From 1831ff157410700d60fb89656e653f3dba742e3c Mon Sep 17 00:00:00 2001
From: Darren Shepherd <darren@rancher.com>
Date: Fri, 27 Mar 2020 10:46:20 -0700
Subject: [PATCH] Update vendor

---
 go.mod                                        |   20 +-
 go.sum                                        |   80 +-
 vendor/github.com/Azure/go-ansiterm/LICENSE   |   21 +
 vendor/github.com/Azure/go-ansiterm/README.md |   12 +
 .../github.com/Azure/go-ansiterm/constants.go |  188 ++
 .../github.com/Azure/go-ansiterm/context.go   |    7 +
 .../Azure/go-ansiterm/csi_entry_state.go      |   49 +
 .../Azure/go-ansiterm/csi_param_state.go      |   38 +
 .../go-ansiterm/escape_intermediate_state.go  |   36 +
 .../Azure/go-ansiterm/escape_state.go         |   47 +
 .../Azure/go-ansiterm/event_handler.go        |   90 +
 .../Azure/go-ansiterm/ground_state.go         |   24 +
 .../Azure/go-ansiterm/osc_string_state.go     |   31 +
 vendor/github.com/Azure/go-ansiterm/parser.go |  151 ++
 .../go-ansiterm/parser_action_helpers.go      |   99 +
 .../Azure/go-ansiterm/parser_actions.go       |  119 +
 vendor/github.com/Azure/go-ansiterm/states.go |   71 +
 .../github.com/Azure/go-ansiterm/utilities.go |   21 +
 .../Azure/go-ansiterm/winterm/ansi.go         |  182 ++
 .../Azure/go-ansiterm/winterm/api.go          |  327 +++
 .../go-ansiterm/winterm/attr_translation.go   |  100 +
 .../go-ansiterm/winterm/cursor_helpers.go     |  101 +
 .../go-ansiterm/winterm/erase_helpers.go      |   84 +
 .../go-ansiterm/winterm/scroll_helper.go      |  118 +
 .../Azure/go-ansiterm/winterm/utilities.go    |    9 +
 .../go-ansiterm/winterm/win_event_handler.go  |  743 +++++++
 vendor/github.com/MakeNowJust/heredoc/LICENSE |   21 +
 .../github.com/MakeNowJust/heredoc/README.md  |   53 +
 .../github.com/MakeNowJust/heredoc/heredoc.go |   98 +
 .../github.com/PuerkitoBio/purell/.gitignore  |    5 +
 .../github.com/PuerkitoBio/purell/.travis.yml |   12 +
 vendor/github.com/PuerkitoBio/purell/LICENSE  |   12 +
 .../github.com/PuerkitoBio/purell/README.md   |  188 ++
 .../github.com/PuerkitoBio/purell/purell.go   |  379 ++++
 .../github.com/PuerkitoBio/urlesc/.travis.yml |   15 +
 vendor/github.com/PuerkitoBio/urlesc/LICENSE  |   27 +
 .../github.com/PuerkitoBio/urlesc/README.md   |   16 +
 .../github.com/PuerkitoBio/urlesc/urlesc.go   |  180 ++
 .../docker/docker/pkg/term/ascii.go           |   66 +
 .../docker/docker/pkg/term/proxy.go           |   78 +
 .../github.com/docker/docker/pkg/term/tc.go   |   20 +
 .../github.com/docker/docker/pkg/term/term.go |  124 ++
 .../docker/docker/pkg/term/term_windows.go    |  221 ++
 .../docker/docker/pkg/term/termios_bsd.go     |   42 +
 .../docker/docker/pkg/term/termios_linux.go   |   39 +
 .../docker/pkg/term/windows/ansi_reader.go    |  263 +++
 .../docker/pkg/term/windows/ansi_writer.go    |   64 +
 .../docker/docker/pkg/term/windows/console.go |   35 +
 .../docker/docker/pkg/term/windows/windows.go |   33 +
 .../docker/docker/pkg/term/winsize.go         |   20 +
 .../docker/spdystream/CONTRIBUTING.md         |   13 +
 vendor/github.com/docker/spdystream/LICENSE   |  191 ++
 .../github.com/docker/spdystream/LICENSE.docs |  425 ++++
 .../github.com/docker/spdystream/MAINTAINERS  |   28 +
 vendor/github.com/docker/spdystream/README.md |   77 +
 .../docker/spdystream/connection.go           |  958 ++++++++
 .../github.com/docker/spdystream/handlers.go  |   38 +
 .../github.com/docker/spdystream/priority.go  |   98 +
 .../docker/spdystream/spdy/dictionary.go      |  187 ++
 .../github.com/docker/spdystream/spdy/read.go |  348 +++
 .../docker/spdystream/spdy/types.go           |  275 +++
 .../docker/spdystream/spdy/write.go           |  318 +++
 vendor/github.com/docker/spdystream/stream.go |  327 +++
 vendor/github.com/docker/spdystream/utils.go  |   16 +
 .../github.com/emicklei/go-restful/.gitignore |   70 +
 .../emicklei/go-restful/.travis.yml           |    6 +
 .../github.com/emicklei/go-restful/CHANGES.md |  273 +++
 vendor/github.com/emicklei/go-restful/LICENSE |   22 +
 .../github.com/emicklei/go-restful/Makefile   |    7 +
 .../github.com/emicklei/go-restful/README.md  |   88 +
 vendor/github.com/emicklei/go-restful/Srcfile |    1 +
 .../emicklei/go-restful/bench_test.sh         |   10 +
 .../emicklei/go-restful/compress.go           |  123 +
 .../emicklei/go-restful/compressor_cache.go   |  103 +
 .../emicklei/go-restful/compressor_pools.go   |   91 +
 .../emicklei/go-restful/compressors.go        |   54 +
 .../emicklei/go-restful/constants.go          |   30 +
 .../emicklei/go-restful/container.go          |  377 ++++
 .../emicklei/go-restful/cors_filter.go        |  202 ++
 .../emicklei/go-restful/coverage.sh           |    2 +
 .../github.com/emicklei/go-restful/curly.go   |  164 ++
 .../emicklei/go-restful/curly_route.go        |   54 +
 vendor/github.com/emicklei/go-restful/doc.go  |  185 ++
 .../emicklei/go-restful/entity_accessors.go   |  162 ++
 .../github.com/emicklei/go-restful/filter.go  |   35 +
 vendor/github.com/emicklei/go-restful/json.go |   11 +
 .../emicklei/go-restful/jsoniter.go           |   12 +
 .../github.com/emicklei/go-restful/jsr311.go  |  297 +++
 .../github.com/emicklei/go-restful/log/log.go |   34 +
 .../github.com/emicklei/go-restful/logger.go  |   32 +
 vendor/github.com/emicklei/go-restful/mime.go |   50 +
 .../emicklei/go-restful/options_filter.go     |   34 +
 .../emicklei/go-restful/parameter.go          |  143 ++
 .../emicklei/go-restful/path_expression.go    |   74 +
 .../emicklei/go-restful/path_processor.go     |   63 +
 .../github.com/emicklei/go-restful/request.go |  118 +
 .../emicklei/go-restful/response.go           |  255 +++
 .../github.com/emicklei/go-restful/route.go   |  170 ++
 .../emicklei/go-restful/route_builder.go      |  326 +++
 .../github.com/emicklei/go-restful/router.go  |   20 +
 .../emicklei/go-restful/service_error.go      |   23 +
 .../emicklei/go-restful/web_service.go        |  290 +++
 .../go-restful/web_service_container.go       |   39 +
 .../github.com/evanphx/json-patch/.travis.yml |   16 +
 vendor/github.com/evanphx/json-patch/LICENSE  |   25 +
 .../github.com/evanphx/json-patch/README.md   |  297 +++
 .../github.com/evanphx/json-patch/errors.go   |   38 +
 vendor/github.com/evanphx/json-patch/merge.go |  383 ++++
 vendor/github.com/evanphx/json-patch/patch.go |  776 +++++++
 .../exponent-io/jsonpath/.gitignore           |   24 +
 .../exponent-io/jsonpath/.travis.yml          |    5 +
 .../github.com/exponent-io/jsonpath/LICENSE   |   21 +
 .../github.com/exponent-io/jsonpath/README.md |   66 +
 .../exponent-io/jsonpath/decoder.go           |  210 ++
 .../github.com/exponent-io/jsonpath/path.go   |   67 +
 .../exponent-io/jsonpath/pathaction.go        |   61 +
 .../go-openapi/jsonpointer/.editorconfig      |   26 +
 .../go-openapi/jsonpointer/.gitignore         |    1 +
 .../go-openapi/jsonpointer/.travis.yml        |   15 +
 .../go-openapi/jsonpointer/CODE_OF_CONDUCT.md |   74 +
 .../github.com/go-openapi/jsonpointer/LICENSE |  202 ++
 .../go-openapi/jsonpointer/README.md          |   15 +
 .../github.com/go-openapi/jsonpointer/go.mod  |    9 +
 .../github.com/go-openapi/jsonpointer/go.sum  |   24 +
 .../go-openapi/jsonpointer/pointer.go         |  390 ++++
 .../go-openapi/jsonreference/.gitignore       |    1 +
 .../go-openapi/jsonreference/.travis.yml      |   15 +
 .../jsonreference/CODE_OF_CONDUCT.md          |   74 +
 .../go-openapi/jsonreference/LICENSE          |  202 ++
 .../go-openapi/jsonreference/README.md        |   15 +
 .../go-openapi/jsonreference/go.mod           |   12 +
 .../go-openapi/jsonreference/go.sum           |   44 +
 .../go-openapi/jsonreference/reference.go     |  156 ++
 .../github.com/go-openapi/spec/.editorconfig  |   26 +
 vendor/github.com/go-openapi/spec/.gitignore  |    2 +
 .../github.com/go-openapi/spec/.golangci.yml  |   23 +
 vendor/github.com/go-openapi/spec/.travis.yml |   15 +
 .../go-openapi/spec/CODE_OF_CONDUCT.md        |   74 +
 vendor/github.com/go-openapi/spec/LICENSE     |  202 ++
 vendor/github.com/go-openapi/spec/README.md   |   10 +
 vendor/github.com/go-openapi/spec/bindata.go  |  297 +++
 vendor/github.com/go-openapi/spec/cache.go    |   60 +
 .../go-openapi/spec/contact_info.go           |   24 +
 vendor/github.com/go-openapi/spec/debug.go    |   47 +
 vendor/github.com/go-openapi/spec/expander.go |  650 ++++++
 .../go-openapi/spec/external_docs.go          |   24 +
 vendor/github.com/go-openapi/spec/go.mod      |   17 +
 vendor/github.com/go-openapi/spec/go.sum      |   74 +
 vendor/github.com/go-openapi/spec/header.go   |  197 ++
 vendor/github.com/go-openapi/spec/info.go     |  165 ++
 vendor/github.com/go-openapi/spec/items.go    |  244 ++
 vendor/github.com/go-openapi/spec/license.go  |   23 +
 .../github.com/go-openapi/spec/normalizer.go  |  152 ++
 .../github.com/go-openapi/spec/operation.go   |  398 ++++
 .../github.com/go-openapi/spec/parameter.go   |  321 +++
 .../github.com/go-openapi/spec/path_item.go   |   87 +
 vendor/github.com/go-openapi/spec/paths.go    |   97 +
 vendor/github.com/go-openapi/spec/ref.go      |  191 ++
 vendor/github.com/go-openapi/spec/response.go |  131 ++
 .../github.com/go-openapi/spec/responses.go   |  127 ++
 vendor/github.com/go-openapi/spec/schema.go   |  596 +++++
 .../go-openapi/spec/schema_loader.go          |  276 +++
 .../go-openapi/spec/security_scheme.go        |  140 ++
 vendor/github.com/go-openapi/spec/spec.go     |   86 +
 vendor/github.com/go-openapi/spec/swagger.go  |  448 ++++
 vendor/github.com/go-openapi/spec/tag.go      |   75 +
 vendor/github.com/go-openapi/spec/unused.go   |  174 ++
 .../github.com/go-openapi/spec/xml_object.go  |   68 +
 .../github.com/go-openapi/swag/.editorconfig  |   26 +
 vendor/github.com/go-openapi/swag/.gitignore  |    4 +
 .../github.com/go-openapi/swag/.golangci.yml  |   22 +
 vendor/github.com/go-openapi/swag/.travis.yml |   15 +
 .../go-openapi/swag/CODE_OF_CONDUCT.md        |   74 +
 vendor/github.com/go-openapi/swag/LICENSE     |  202 ++
 vendor/github.com/go-openapi/swag/README.md   |   22 +
 vendor/github.com/go-openapi/swag/convert.go  |  208 ++
 .../go-openapi/swag/convert_types.go          |  595 +++++
 vendor/github.com/go-openapi/swag/doc.go      |   32 +
 vendor/github.com/go-openapi/swag/go.mod      |   14 +
 vendor/github.com/go-openapi/swag/go.sum      |   20 +
 vendor/github.com/go-openapi/swag/json.go     |  312 +++
 vendor/github.com/go-openapi/swag/loading.go  |   80 +
 .../github.com/go-openapi/swag/name_lexem.go  |   87 +
 vendor/github.com/go-openapi/swag/net.go      |   38 +
 vendor/github.com/go-openapi/swag/path.go     |   59 +
 .../github.com/go-openapi/swag/post_go18.go   |   23 +
 .../github.com/go-openapi/swag/post_go19.go   |   67 +
 vendor/github.com/go-openapi/swag/pre_go18.go |   23 +
 vendor/github.com/go-openapi/swag/pre_go19.go |   69 +
 vendor/github.com/go-openapi/swag/split.go    |  262 +++
 vendor/github.com/go-openapi/swag/util.go     |  385 ++++
 vendor/github.com/go-openapi/swag/yaml.go     |  246 ++
 vendor/github.com/google/btree/.travis.yml    |    1 +
 vendor/github.com/google/btree/LICENSE        |  202 ++
 vendor/github.com/google/btree/README.md      |   12 +
 vendor/github.com/google/btree/btree.go       |  890 ++++++++
 vendor/github.com/google/gofuzz/README.md     |    2 +-
 vendor/github.com/google/gofuzz/fuzz.go       |   35 +-
 .../gregjones/httpcache/.travis.yml           |   18 +
 .../gregjones/httpcache/LICENSE.txt           |    7 +
 .../github.com/gregjones/httpcache/README.md  |   25 +
 .../httpcache/diskcache/diskcache.go          |   61 +
 .../gregjones/httpcache/httpcache.go          |  551 +++++
 .../inconshreveable/mousetrap/LICENSE         |   13 +
 .../inconshreveable/mousetrap/README.md       |   23 +
 .../inconshreveable/mousetrap/trap_others.go  |   15 +
 .../inconshreveable/mousetrap/trap_windows.go |   98 +
 .../mousetrap/trap_windows_1.4.go             |   46 +
 .../github.com/liggitt/tabwriter/.travis.yml  |   11 +
 vendor/github.com/liggitt/tabwriter/LICENSE   |   27 +
 vendor/github.com/liggitt/tabwriter/README.md |    7 +
 .../github.com/liggitt/tabwriter/tabwriter.go |  637 ++++++
 vendor/github.com/mailru/easyjson/LICENSE     |    7 +
 .../github.com/mailru/easyjson/buffer/pool.go |  270 +++
 .../mailru/easyjson/jlexer/bytestostr.go      |   24 +
 .../easyjson/jlexer/bytestostr_nounsafe.go    |   13 +
 .../mailru/easyjson/jlexer/error.go           |   15 +
 .../mailru/easyjson/jlexer/lexer.go           | 1182 ++++++++++
 .../mailru/easyjson/jwriter/writer.go         |  390 ++++
 .../mitchellh/go-wordwrap/LICENSE.md          |   21 +
 .../mitchellh/go-wordwrap/README.md           |   39 +
 .../github.com/mitchellh/go-wordwrap/go.mod   |    1 +
 .../mitchellh/go-wordwrap/wordwrap.go         |   73 +
 vendor/github.com/peterbourgon/diskv/LICENSE  |   19 +
 .../github.com/peterbourgon/diskv/README.md   |  141 ++
 .../peterbourgon/diskv/compression.go         |   64 +
 vendor/github.com/peterbourgon/diskv/diskv.go |  624 ++++++
 vendor/github.com/peterbourgon/diskv/index.go |  115 +
 .../norman/objectclient/object_client.go      |   23 +-
 .../rancher/norman/types/server_types.go      |   10 +
 .../russross/blackfriday/.gitignore           |    8 +
 .../russross/blackfriday/.travis.yml          |   17 +
 .../russross/blackfriday/LICENSE.txt          |   29 +
 .../github.com/russross/blackfriday/README.md |  369 +++
 .../github.com/russross/blackfriday/block.go  | 1474 ++++++++++++
 vendor/github.com/russross/blackfriday/doc.go |   32 +
 vendor/github.com/russross/blackfriday/go.mod |    1 +
 .../github.com/russross/blackfriday/html.go   |  938 ++++++++
 .../github.com/russross/blackfriday/inline.go | 1154 ++++++++++
 .../github.com/russross/blackfriday/latex.go  |  334 +++
 .../russross/blackfriday/markdown.go          |  941 ++++++++
 .../russross/blackfriday/smartypants.go       |  430 ++++
 vendor/github.com/spf13/cobra/.gitignore      |   38 +
 vendor/github.com/spf13/cobra/.mailmap        |    3 +
 vendor/github.com/spf13/cobra/.travis.yml     |   31 +
 vendor/github.com/spf13/cobra/LICENSE.txt     |  174 ++
 vendor/github.com/spf13/cobra/README.md       |  741 ++++++
 vendor/github.com/spf13/cobra/args.go         |  101 +
 .../spf13/cobra/bash_completions.go           |  547 +++++
 .../spf13/cobra/bash_completions.md           |  256 +++
 vendor/github.com/spf13/cobra/cobra.go        |  207 ++
 vendor/github.com/spf13/cobra/command.go      | 1594 +++++++++++++
 .../github.com/spf13/cobra/command_notwin.go  |    5 +
 vendor/github.com/spf13/cobra/command_win.go  |   26 +
 vendor/github.com/spf13/cobra/go.mod          |   13 +
 vendor/github.com/spf13/cobra/go.sum          |   51 +
 .../spf13/cobra/powershell_completions.go     |  100 +
 .../spf13/cobra/powershell_completions.md     |   14 +
 .../spf13/cobra/shell_completions.go          |   85 +
 .../github.com/spf13/cobra/zsh_completions.go |  336 +++
 .../github.com/spf13/cobra/zsh_completions.md |   39 +
 vendor/golang.org/x/crypto/blowfish/block.go  |  159 ++
 vendor/golang.org/x/crypto/blowfish/cipher.go |   99 +
 vendor/golang.org/x/crypto/blowfish/const.go  |  199 ++
 .../golang.org/x/crypto/poly1305/sum_arm.go   |   19 -
 vendor/golang.org/x/crypto/poly1305/sum_arm.s |  427 ----
 .../golang.org/x/crypto/poly1305/sum_noasm.go |    2 +-
 vendor/golang.org/x/crypto/ssh/certs.go       |   35 +-
 .../ssh/internal/bcrypt_pbkdf/bcrypt_pbkdf.go |   93 +
 vendor/golang.org/x/crypto/ssh/keys.go        |  474 +++-
 vendor/golang.org/x/crypto/ssh/server.go      |    4 +-
 .../x/crypto/ssh/terminal/terminal.go         |   17 +-
 .../x/crypto/ssh/terminal/util_windows.go     |    4 +-
 vendor/golang.org/x/text/encoding/encoding.go |  335 +++
 .../internal/identifier/identifier.go         |   81 +
 .../text/encoding/internal/identifier/mib.go  | 1619 ++++++++++++++
 .../x/text/encoding/internal/internal.go      |   75 +
 .../x/text/encoding/unicode/override.go       |   82 +
 .../x/text/encoding/unicode/unicode.go        |  434 ++++
 .../internal/utf8internal/utf8internal.go     |   87 +
 vendor/golang.org/x/text/runes/cond.go        |  187 ++
 vendor/golang.org/x/text/runes/runes.go       |  355 +++
 vendor/golang.org/x/text/width/kind_string.go |   28 +
 .../golang.org/x/text/width/tables10.0.0.go   | 1318 +++++++++++
 .../golang.org/x/text/width/tables11.0.0.go   | 1330 +++++++++++
 vendor/golang.org/x/text/width/tables9.0.0.go | 1286 +++++++++++
 vendor/golang.org/x/text/width/transform.go   |  239 ++
 vendor/golang.org/x/text/width/trieval.go     |   30 +
 vendor/golang.org/x/text/width/width.go       |  206 ++
 .../grpc/internal/internal.go                 |    2 +-
 vendor/gopkg.in/yaml.v2/.travis.yml           |   18 +-
 vendor/gopkg.in/yaml.v2/scannerc.go           |  109 +-
 vendor/gopkg.in/yaml.v2/yaml.go               |    2 +-
 vendor/gopkg.in/yaml.v2/yamlh.go              |    1 +
 vendor/k8s.io/api/admission/v1/doc.go         |   23 +
 .../k8s.io/api/admission/v1/generated.pb.go   | 1743 +++++++++++++++
 .../k8s.io/api/admission/v1/generated.proto   |  160 ++
 vendor/k8s.io/api/admission/v1/register.go    |   51 +
 vendor/k8s.io/api/admission/v1/types.go       |  162 ++
 .../v1/types_swagger_doc_generated.go         |   77 +
 .../api/admission/v1/zz_generated.deepcopy.go |  136 ++
 vendor/k8s.io/api/admission/v1beta1/doc.go    |   23 +
 .../api/admission/v1beta1/generated.pb.go     | 1743 +++++++++++++++
 .../api/admission/v1beta1/generated.proto     |  160 ++
 .../k8s.io/api/admission/v1beta1/register.go  |   51 +
 vendor/k8s.io/api/admission/v1beta1/types.go  |  162 ++
 .../v1beta1/types_swagger_doc_generated.go    |   77 +
 .../v1beta1/zz_generated.deepcopy.go          |  136 ++
 .../admissionregistration/v1/generated.pb.go  |   60 +-
 .../v1beta1/generated.pb.go                   |   60 +-
 vendor/k8s.io/api/apps/v1/generated.pb.go     |   60 +-
 .../k8s.io/api/apps/v1beta1/generated.pb.go   |   60 +-
 .../k8s.io/api/apps/v1beta2/generated.pb.go   |   60 +-
 .../v1alpha1/generated.pb.go                  |   60 +-
 .../api/authentication/v1/generated.pb.go     |   60 +-
 vendor/k8s.io/api/authentication/v1/types.go  |    2 +-
 .../authentication/v1beta1/generated.pb.go    |   60 +-
 .../api/authentication/v1beta1/types.go       |    2 +-
 .../api/authorization/v1/generated.pb.go      |   60 +-
 vendor/k8s.io/api/authorization/v1/types.go   |    8 +-
 .../api/authorization/v1beta1/generated.pb.go |   60 +-
 .../k8s.io/api/authorization/v1beta1/types.go |    8 +-
 .../k8s.io/api/autoscaling/v1/generated.pb.go |   60 +-
 .../api/autoscaling/v2beta1/generated.pb.go   |   60 +-
 .../api/autoscaling/v2beta2/generated.pb.go   | 1038 +++++++--
 .../api/autoscaling/v2beta2/generated.proto   |   66 +
 .../k8s.io/api/autoscaling/v2beta2/types.go   |   84 +
 .../v2beta2/types_swagger_doc_generated.go    |   33 +
 .../v2beta2/zz_generated.deepcopy.go          |   78 +
 vendor/k8s.io/api/batch/v1/generated.pb.go    |   60 +-
 .../k8s.io/api/batch/v1beta1/generated.pb.go  |   60 +-
 .../k8s.io/api/batch/v2alpha1/generated.pb.go |   60 +-
 .../api/certificates/v1beta1/generated.pb.go  |  210 +-
 .../api/certificates/v1beta1/generated.proto  |   13 +
 .../k8s.io/api/certificates/v1beta1/types.go  |   35 +
 .../v1beta1/types_swagger_doc_generated.go    |   15 +-
 .../v1beta1/zz_generated.deepcopy.go          |    5 +
 .../api/coordination/v1/generated.pb.go       |   60 +-
 .../api/coordination/v1beta1/generated.pb.go  |   60 +-
 vendor/k8s.io/api/core/v1/generated.pb.go     | 1977 +++++++++--------
 vendor/k8s.io/api/core/v1/generated.proto     |   88 +-
 vendor/k8s.io/api/core/v1/resource.go         |    8 +
 vendor/k8s.io/api/core/v1/types.go            |  113 +-
 .../core/v1/types_swagger_doc_generated.go    |   65 +-
 .../k8s.io/api/core/v1/well_known_taints.go   |    7 -
 .../api/core/v1/zz_generated.deepcopy.go      |   33 +-
 .../api/discovery/v1alpha1/generated.pb.go    |   60 +-
 .../api/discovery/v1beta1/generated.pb.go     |   60 +-
 .../api/discovery/v1beta1/generated.proto     |    5 +-
 vendor/k8s.io/api/discovery/v1beta1/types.go  |    5 +-
 .../v1beta1/types_swagger_doc_generated.go    |    2 +-
 .../k8s.io/api/events/v1beta1/generated.pb.go |   60 +-
 .../api/extensions/v1beta1/generated.pb.go    |  815 ++++---
 .../api/extensions/v1beta1/generated.proto    |   93 +-
 .../k8s.io/api/extensions/v1beta1/register.go |    1 -
 vendor/k8s.io/api/extensions/v1beta1/types.go |  130 +-
 .../v1beta1/types_swagger_doc_generated.go    |   33 +-
 .../v1beta1/zz_generated.deepcopy.go          |   48 +-
 .../api/flowcontrol/v1alpha1/generated.pb.go  |   60 +-
 .../api/flowcontrol/v1alpha1/generated.proto  |   16 +-
 .../k8s.io/api/flowcontrol/v1alpha1/types.go  |   26 +-
 .../v1alpha1/types_swagger_doc_generated.go   |    2 +-
 vendor/k8s.io/api/imagepolicy/v1alpha1/doc.go |   23 +
 .../api/imagepolicy/v1alpha1/generated.pb.go  | 1386 ++++++++++++
 .../api/imagepolicy/v1alpha1/generated.proto  |   86 +
 .../api/imagepolicy/v1alpha1/register.go      |   51 +
 .../k8s.io/api/imagepolicy/v1alpha1/types.go  |   80 +
 .../v1alpha1/types_swagger_doc_generated.go   |   71 +
 .../v1alpha1/zz_generated.deepcopy.go         |  120 +
 .../k8s.io/api/networking/v1/generated.pb.go  |   60 +-
 .../k8s.io/api/networking/v1/generated.proto  |   10 +-
 vendor/k8s.io/api/networking/v1/types.go      |   10 +-
 .../v1/types_swagger_doc_generated.go         |    6 +-
 .../api/networking/v1beta1/generated.pb.go    |  991 ++++++++-
 .../api/networking/v1beta1/generated.proto    |  134 +-
 .../k8s.io/api/networking/v1beta1/register.go |    2 +
 vendor/k8s.io/api/networking/v1beta1/types.go |  176 +-
 .../v1beta1/types_swagger_doc_generated.go    |   51 +-
 .../v1beta1/well_known_annotations.go         |   32 +
 .../v1beta1/zz_generated.deepcopy.go          |  105 +-
 .../k8s.io/api/node/v1alpha1/generated.pb.go  |   60 +-
 .../k8s.io/api/node/v1beta1/generated.pb.go   |   60 +-
 .../k8s.io/api/policy/v1beta1/generated.pb.go |  302 ++-
 .../k8s.io/api/policy/v1beta1/generated.proto |    2 +-
 vendor/k8s.io/api/policy/v1beta1/types.go     |    4 +-
 .../v1beta1/types_swagger_doc_generated.go    |    2 +-
 vendor/k8s.io/api/rbac/v1/generated.pb.go     |   60 +-
 .../k8s.io/api/rbac/v1alpha1/generated.pb.go  |   60 +-
 .../k8s.io/api/rbac/v1alpha1/generated.proto  |    1 -
 vendor/k8s.io/api/rbac/v1alpha1/types.go      |    1 -
 .../v1alpha1/types_swagger_doc_generated.go   |    2 +-
 .../k8s.io/api/rbac/v1beta1/generated.pb.go   |   60 +-
 .../k8s.io/api/scheduling/v1/generated.pb.go  |   60 +-
 .../api/scheduling/v1alpha1/generated.pb.go   |   60 +-
 .../api/scheduling/v1beta1/generated.pb.go    |   60 +-
 .../api/settings/v1alpha1/generated.pb.go     |   60 +-
 vendor/k8s.io/api/storage/v1/generated.pb.go  |  933 ++++++--
 vendor/k8s.io/api/storage/v1/generated.proto  |   91 +
 vendor/k8s.io/api/storage/v1/register.go      |    3 +
 vendor/k8s.io/api/storage/v1/types.go         |  126 ++
 .../storage/v1/types_swagger_doc_generated.go |   31 +
 .../api/storage/v1/zz_generated.deepcopy.go   |   91 +
 .../api/storage/v1alpha1/generated.pb.go      |   60 +-
 .../api/storage/v1beta1/generated.pb.go       |   60 +-
 .../apimachinery/pkg/api/equality/semantic.go |   49 +
 .../pkg/api/resource/generated.pb.go          |    2 +-
 .../apimachinery/pkg/api/resource/math.go     |    8 +-
 .../apimachinery/pkg/api/resource/quantity.go |    5 +
 .../apimachinery/pkg/api/validation/doc.go    |   18 +
 .../pkg/api/validation/generic.go             |   85 +
 .../pkg/api/validation/objectmeta.go          |  263 +++
 .../pkg/apis/meta/internalversion/register.go |   19 +-
 .../pkg/apis/meta/v1/conversion.go            |   59 -
 .../apimachinery/pkg/apis/meta/v1/duration.go |    5 +
 .../pkg/apis/meta/v1/generated.pb.go          |   60 +-
 .../apimachinery/pkg/apis/meta/v1/helpers.go  |    4 +-
 .../apimachinery/pkg/apis/meta/v1/register.go |   17 +-
 .../apimachinery/pkg/apis/meta/v1/time.go     |   10 +
 .../unstructured/unstructuredscheme/scheme.go |  133 ++
 .../pkg/apis/meta/v1/validation/validation.go |  186 ++
 .../pkg/apis/meta/v1beta1/generated.pb.go     |  101 +-
 .../pkg/apis/meta/v1beta1/generated.proto     |    1 -
 .../pkg/apis/meta/v1beta1/register.go         |   18 +-
 .../apimachinery/pkg/conversion/converter.go  |  155 +-
 .../apimachinery/pkg/runtime/conversion.go    |   32 +-
 .../apimachinery/pkg/runtime/converter.go     |  122 +-
 .../apimachinery/pkg/runtime/embedded.go      |   15 +-
 .../apimachinery/pkg/runtime/generated.pb.go  |   60 +-
 .../pkg/runtime/schema/generated.pb.go        |    2 +-
 .../k8s.io/apimachinery/pkg/runtime/scheme.go |   45 +-
 .../apimachinery/pkg/util/clock/clock.go      |   29 +-
 .../pkg/util/duration/duration.go             |   89 +
 .../apimachinery/pkg/util/errors/errors.go    |   32 +-
 .../apimachinery/pkg/util/httpstream/doc.go   |   19 +
 .../pkg/util/httpstream/httpstream.go         |  145 ++
 .../pkg/util/httpstream/spdy/connection.go    |  145 ++
 .../pkg/util/httpstream/spdy/roundtripper.go  |  335 +++
 .../pkg/util/httpstream/spdy/upgrade.go       |  107 +
 .../pkg/util/intstr/generated.pb.go           |   60 +-
 .../apimachinery/pkg/util/intstr/intstr.go    |    3 +-
 .../k8s.io/apimachinery/pkg/util/net/http.go  |   53 +-
 .../pkg/util/remotecommand/constants.go       |   53 +
 .../pkg/util/validation/validation.go         |   38 +
 .../k8s.io/apimachinery/pkg/util/wait/wait.go |  137 +-
 vendor/k8s.io/apimachinery/pkg/watch/watch.go |   10 +-
 .../third_party/forked/golang/netutil/addr.go |   27 +
 .../apiserver/pkg/apis/apiserver/types.go     |   67 +-
 .../pkg/apis/apiserver/v1alpha1/types.go      |   78 +-
 .../v1alpha1/zz_generated.conversion.go       |  126 +-
 .../v1alpha1/zz_generated.deepcopy.go         |   79 +-
 .../apis/apiserver/zz_generated.deepcopy.go   |   79 +-
 .../pkg/apis/audit/v1/generated.pb.go         |   60 +-
 .../k8s.io/apiserver/pkg/apis/config/types.go |   14 +-
 .../apiserver/pkg/apis/config/v1/defaults.go  |   44 +
 .../apiserver/pkg/apis/config/v1/register.go  |    1 +
 .../apiserver/pkg/apis/config/v1/types.go     |   14 +-
 .../apis/config/v1/zz_generated.conversion.go |    4 +-
 .../apis/config/v1/zz_generated.deepcopy.go   |    5 +
 .../apis/config/v1/zz_generated.defaults.go   |   13 +
 .../pkg/apis/config/zz_generated.deepcopy.go  |    5 +
 vendor/k8s.io/cli-runtime/LICENSE             |  202 ++
 .../pkg/genericclioptions/builder_flags.go    |  220 ++
 .../genericclioptions/builder_flags_fake.go   |   54 +
 .../pkg/genericclioptions/config_flags.go     |  382 ++++
 .../genericclioptions/config_flags_fake.go    |  110 +
 .../cli-runtime/pkg/genericclioptions/doc.go  |   19 +
 .../pkg/genericclioptions/filename_flags.go   |   79 +
 .../pkg/genericclioptions/io_options.go       |   57 +
 .../pkg/genericclioptions/json_yaml_flags.go  |   68 +
 .../pkg/genericclioptions/jsonpath_flags.go   |  130 ++
 .../genericclioptions/kube_template_flags.go  |   89 +
 .../pkg/genericclioptions/name_flags.go       |   81 +
 .../pkg/genericclioptions/print_flags.go      |  158 ++
 .../pkg/genericclioptions/record_flags.go     |  199 ++
 .../pkg/genericclioptions/template_flags.go   |  135 ++
 .../cli-runtime/pkg/kustomize/builder.go      |   32 +
 .../configmapandsecret/configmapfactory.go    |  125 ++
 .../k8sdeps/configmapandsecret/kv.go          |  107 +
 .../configmapandsecret/secretfactory.go       |  106 +
 .../cli-runtime/pkg/kustomize/k8sdeps/doc.go  |   76 +
 .../pkg/kustomize/k8sdeps/factory.go          |   34 +
 .../kustomize/k8sdeps/kunstruct/factory.go    |  118 +
 .../pkg/kustomize/k8sdeps/kunstruct/helper.go |   71 +
 .../kustomize/k8sdeps/kunstruct/kunstruct.go  |   92 +
 .../pkg/kustomize/k8sdeps/kv/kv.go            |  102 +
 .../kustomize/k8sdeps/transformer/factory.go  |   43 +
 .../k8sdeps/transformer/hash/hash.go          |  184 ++
 .../k8sdeps/transformer/hash/namehash.go      |   47 +
 .../k8sdeps/transformer/patch/patch.go        |  174 ++
 .../patch/patchconflictdetector.go            |  137 ++
 .../kustomize/k8sdeps/validator/validators.go |   61 +
 .../cli-runtime/pkg/printers/discard.go       |   30 +
 vendor/k8s.io/cli-runtime/pkg/printers/doc.go |   19 +
 .../cli-runtime/pkg/printers/interface.go     |   54 +
 .../k8s.io/cli-runtime/pkg/printers/json.go   |  142 ++
 .../cli-runtime/pkg/printers/jsonpath.go      |  147 ++
 .../k8s.io/cli-runtime/pkg/printers/name.go   |  130 ++
 .../cli-runtime/pkg/printers/sourcechecker.go |   60 +
 .../cli-runtime/pkg/printers/tableprinter.go  |  573 +++++
 .../cli-runtime/pkg/printers/tabwriter.go     |   36 +
 .../cli-runtime/pkg/printers/template.go      |  118 +
 .../cli-runtime/pkg/printers/typesetter.go    |   95 +
 .../cli-runtime/pkg/resource/builder.go       | 1193 ++++++++++
 .../k8s.io/cli-runtime/pkg/resource/client.go |   58 +
 .../cli-runtime/pkg/resource/crd_finder.go    |  110 +
 vendor/k8s.io/cli-runtime/pkg/resource/doc.go |   24 +
 .../pkg/resource/dry_run_verifier.go          |  121 +
 .../k8s.io/cli-runtime/pkg/resource/fake.go   |   40 +
 .../k8s.io/cli-runtime/pkg/resource/helper.go |  228 ++
 .../cli-runtime/pkg/resource/interfaces.go    |  103 +
 .../k8s.io/cli-runtime/pkg/resource/mapper.go |  161 ++
 .../pkg/resource/metadata_decoder.go          |   59 +
 .../k8s.io/cli-runtime/pkg/resource/result.go |  242 ++
 .../k8s.io/cli-runtime/pkg/resource/scheme.go |   82 +
 .../cli-runtime/pkg/resource/selector.go      |  118 +
 .../cli-runtime/pkg/resource/visitor.go       |  764 +++++++
 .../discovery/cached/disk/cached_discovery.go |  300 +++
 .../discovery/cached/disk/round_tripper.go    |   65 +
 .../client-go/discovery/discovery_client.go   |   13 +-
 vendor/k8s.io/client-go/dynamic/interface.go  |   61 +
 vendor/k8s.io/client-go/dynamic/scheme.go     |  108 +
 vendor/k8s.io/client-go/dynamic/simple.go     |  327 +++
 .../k8s.io/client-go/kubernetes/clientset.go  |    2 +-
 .../v1/mutatingwebhookconfiguration.go        |   64 +-
 .../v1/validatingwebhookconfiguration.go      |   64 +-
 .../v1beta1/mutatingwebhookconfiguration.go   |   64 +-
 .../v1beta1/validatingwebhookconfiguration.go |   64 +-
 .../typed/apps/v1/controllerrevision.go       |   64 +-
 .../kubernetes/typed/apps/v1/daemonset.go     |   72 +-
 .../kubernetes/typed/apps/v1/deployment.go    |   85 +-
 .../kubernetes/typed/apps/v1/replicaset.go    |   85 +-
 .../kubernetes/typed/apps/v1/statefulset.go   |   85 +-
 .../typed/apps/v1beta1/controllerrevision.go  |   64 +-
 .../typed/apps/v1beta1/deployment.go          |   72 +-
 .../typed/apps/v1beta1/statefulset.go         |   72 +-
 .../typed/apps/v1beta2/controllerrevision.go  |   64 +-
 .../typed/apps/v1beta2/daemonset.go           |   72 +-
 .../typed/apps/v1beta2/deployment.go          |   72 +-
 .../typed/apps/v1beta2/replicaset.go          |   72 +-
 .../typed/apps/v1beta2/statefulset.go         |   85 +-
 .../auditregistration/v1alpha1/auditsink.go   |   64 +-
 .../authentication/v1/generated_expansion.go  |    2 +
 .../typed/authentication/v1/tokenreview.go    |   18 +
 .../v1/tokenreview_expansion.go               |   43 -
 .../v1beta1/generated_expansion.go            |    2 +
 .../authentication/v1beta1/tokenreview.go     |   18 +
 .../v1beta1/tokenreview_expansion.go          |   43 -
 .../authorization/v1/generated_expansion.go   |    8 +
 .../v1/localsubjectaccessreview.go            |   19 +
 .../v1/localsubjectaccessreview_expansion.go  |   44 -
 .../v1/selfsubjectaccessreview.go             |   18 +
 .../v1/selfsubjectaccessreview_expansion.go   |   43 -
 .../v1/selfsubjectrulesreview.go              |   18 +
 .../v1/selfsubjectrulesreview_expansion.go    |   43 -
 .../authorization/v1/subjectaccessreview.go   |   18 +
 .../v1/subjectaccessreview_expansion.go       |   44 -
 .../v1beta1/generated_expansion.go            |    8 +
 .../v1beta1/localsubjectaccessreview.go       |   19 +
 .../localsubjectaccessreview_expansion.go     |   44 -
 .../v1beta1/selfsubjectaccessreview.go        |   18 +
 .../selfsubjectaccessreview_expansion.go      |   43 -
 .../v1beta1/selfsubjectrulesreview.go         |   18 +
 .../selfsubjectrulesreview_expansion.go       |   43 -
 .../v1beta1/subjectaccessreview.go            |   18 +
 .../v1beta1/subjectaccessreview_expansion.go  |   44 -
 .../autoscaling/v1/horizontalpodautoscaler.go |   72 +-
 .../v2beta1/horizontalpodautoscaler.go        |   72 +-
 .../v2beta2/horizontalpodautoscaler.go        |   72 +-
 .../kubernetes/typed/batch/v1/job.go          |   72 +-
 .../kubernetes/typed/batch/v1beta1/cronjob.go |   72 +-
 .../typed/batch/v2alpha1/cronjob.go           |   72 +-
 .../v1beta1/certificatesigningrequest.go      |   72 +-
 .../certificatesigningrequest_expansion.go    |   11 +-
 .../kubernetes/typed/coordination/v1/lease.go |   64 +-
 .../typed/coordination/v1beta1/lease.go       |   64 +-
 .../typed/core/v1/componentstatus.go          |   64 +-
 .../kubernetes/typed/core/v1/configmap.go     |   64 +-
 .../kubernetes/typed/core/v1/endpoints.go     |   64 +-
 .../kubernetes/typed/core/v1/event.go         |   64 +-
 .../typed/core/v1/event_expansion.go          |   11 +-
 .../typed/core/v1/generated_expansion.go      |    2 +
 .../kubernetes/typed/core/v1/limitrange.go    |   64 +-
 .../kubernetes/typed/core/v1/namespace.go     |   58 +-
 .../typed/core/v1/namespace_expansion.go      |   14 +-
 .../kubernetes/typed/core/v1/node.go          |   72 +-
 .../typed/core/v1/node_expansion.go           |   10 +-
 .../typed/core/v1/persistentvolume.go         |   72 +-
 .../typed/core/v1/persistentvolumeclaim.go    |   72 +-
 .../client-go/kubernetes/typed/core/v1/pod.go |   85 +-
 .../kubernetes/typed/core/v1/pod_expansion.go |   17 +-
 .../kubernetes/typed/core/v1/podtemplate.go   |   64 +-
 .../typed/core/v1/replicationcontroller.go    |   85 +-
 .../kubernetes/typed/core/v1/resourcequota.go |   72 +-
 .../kubernetes/typed/core/v1/secret.go        |   64 +-
 .../kubernetes/typed/core/v1/service.go       |   58 +-
 .../typed/core/v1/serviceaccount.go           |   82 +-
 .../typed/core/v1/serviceaccount_expansion.go |   41 -
 .../typed/discovery/v1alpha1/endpointslice.go |   64 +-
 .../typed/discovery/v1beta1/endpointslice.go  |   64 +-
 .../kubernetes/typed/events/v1beta1/event.go  |   64 +-
 .../typed/events/v1beta1/event_expansion.go   |    7 +-
 .../typed/extensions/v1beta1/daemonset.go     |   72 +-
 .../typed/extensions/v1beta1/deployment.go    |   85 +-
 .../v1beta1/deployment_expansion.go           |   14 +-
 .../typed/extensions/v1beta1/ingress.go       |   72 +-
 .../typed/extensions/v1beta1/networkpolicy.go |   64 +-
 .../extensions/v1beta1/podsecuritypolicy.go   |   64 +-
 .../typed/extensions/v1beta1/replicaset.go    |   85 +-
 .../typed/flowcontrol/v1alpha1/flowschema.go  |   72 +-
 .../v1alpha1/prioritylevelconfiguration.go    |   72 +-
 .../typed/networking/v1/networkpolicy.go      |   64 +-
 .../networking/v1beta1/generated_expansion.go |    2 +
 .../typed/networking/v1beta1/ingress.go       |   72 +-
 .../typed/networking/v1beta1/ingressclass.go  |  168 ++
 .../networking/v1beta1/networking_client.go   |    5 +
 .../typed/node/v1alpha1/runtimeclass.go       |   64 +-
 .../typed/node/v1beta1/runtimeclass.go        |   64 +-
 .../policy/v1beta1/eviction_expansion.go      |    8 +-
 .../policy/v1beta1/poddisruptionbudget.go     |   72 +-
 .../typed/policy/v1beta1/podsecuritypolicy.go |   64 +-
 .../kubernetes/typed/rbac/v1/clusterrole.go   |   64 +-
 .../typed/rbac/v1/clusterrolebinding.go       |   64 +-
 .../kubernetes/typed/rbac/v1/role.go          |   64 +-
 .../kubernetes/typed/rbac/v1/rolebinding.go   |   64 +-
 .../typed/rbac/v1alpha1/clusterrole.go        |   64 +-
 .../typed/rbac/v1alpha1/clusterrolebinding.go |   64 +-
 .../kubernetes/typed/rbac/v1alpha1/role.go    |   64 +-
 .../typed/rbac/v1alpha1/rolebinding.go        |   64 +-
 .../typed/rbac/v1beta1/clusterrole.go         |   64 +-
 .../typed/rbac/v1beta1/clusterrolebinding.go  |   64 +-
 .../kubernetes/typed/rbac/v1beta1/role.go     |   64 +-
 .../typed/rbac/v1beta1/rolebinding.go         |   64 +-
 .../typed/scheduling/v1/priorityclass.go      |   64 +-
 .../scheduling/v1alpha1/priorityclass.go      |   64 +-
 .../typed/scheduling/v1beta1/priorityclass.go |   64 +-
 .../typed/settings/v1alpha1/podpreset.go      |   64 +-
 .../kubernetes/typed/storage/v1/csidriver.go  |  168 ++
 .../kubernetes/typed/storage/v1/csinode.go    |   64 +-
 .../typed/storage/v1/generated_expansion.go   |    2 +
 .../typed/storage/v1/storage_client.go        |    5 +
 .../typed/storage/v1/storageclass.go          |   64 +-
 .../typed/storage/v1/volumeattachment.go      |   72 +-
 .../storage/v1alpha1/volumeattachment.go      |   72 +-
 .../typed/storage/v1beta1/csidriver.go        |   64 +-
 .../typed/storage/v1beta1/csinode.go          |   64 +-
 .../typed/storage/v1beta1/storageclass.go     |   64 +-
 .../typed/storage/v1beta1/volumeattachment.go |   72 +-
 .../plugin/pkg/client/auth/exec/exec.go       |   27 +-
 .../plugin/pkg/client/auth/exec/metrics.go    |   60 +
 vendor/k8s.io/client-go/rest/request.go       |  177 +-
 .../restmapper/category_expansion.go          |  119 +
 .../k8s.io/client-go/restmapper/discovery.go  |  338 +++
 .../k8s.io/client-go/restmapper/shortcut.go   |  172 ++
 vendor/k8s.io/client-go/scale/client.go       |  238 ++
 vendor/k8s.io/client-go/scale/doc.go          |   21 +
 vendor/k8s.io/client-go/scale/interfaces.go   |   47 +
 .../client-go/scale/scheme/appsint/doc.go     |   22 +
 .../scale/scheme/appsint/register.go          |   55 +
 .../scale/scheme/appsv1beta1/conversion.go    |   73 +
 .../client-go/scale/scheme/appsv1beta1/doc.go |   20 +
 .../scale/scheme/appsv1beta1/register.go      |   45 +
 .../appsv1beta1/zz_generated.conversion.go    |  133 ++
 .../scale/scheme/appsv1beta2/conversion.go    |   73 +
 .../client-go/scale/scheme/appsv1beta2/doc.go |   20 +
 .../scale/scheme/appsv1beta2/register.go      |   45 +
 .../appsv1beta2/zz_generated.conversion.go    |  133 ++
 .../scale/scheme/autoscalingv1/conversion.go  |   54 +
 .../scale/scheme/autoscalingv1/doc.go         |   20 +
 .../scale/scheme/autoscalingv1/register.go    |   45 +
 .../autoscalingv1/zz_generated.conversion.go  |  132 ++
 vendor/k8s.io/client-go/scale/scheme/doc.go   |   22 +
 .../scale/scheme/extensionsint/doc.go         |   22 +
 .../scale/scheme/extensionsint/register.go    |   55 +
 .../scheme/extensionsv1beta1/conversion.go    |   73 +
 .../scale/scheme/extensionsv1beta1/doc.go     |   20 +
 .../scheme/extensionsv1beta1/register.go      |   45 +
 .../zz_generated.conversion.go                |  133 ++
 .../k8s.io/client-go/scale/scheme/register.go |   54 +
 vendor/k8s.io/client-go/scale/scheme/types.go |   60 +
 .../scale/scheme/zz_generated.deepcopy.go     |   91 +
 vendor/k8s.io/client-go/scale/util.go         |  197 ++
 .../forked/golang/template/exec.go            |   94 +
 .../forked/golang/template/funcs.go           |  599 +++++
 .../client-go/tools/cache/controller.go       |   61 +-
 .../client-go/tools/cache/delta_fifo.go       |  156 +-
 .../client-go/tools/cache/expiration_cache.go |    4 +-
 vendor/k8s.io/client-go/tools/cache/fifo.go   |   56 +-
 vendor/k8s.io/client-go/tools/cache/index.go  |   15 +-
 .../k8s.io/client-go/tools/cache/listwatch.go |   10 +-
 .../tools/cache/mutation_detector.go          |   30 +-
 .../k8s.io/client-go/tools/cache/reflector.go |  179 +-
 .../client-go/tools/cache/shared_informer.go  |  196 +-
 vendor/k8s.io/client-go/tools/cache/store.go  |   46 +-
 .../tools/cache/thread_safe_store.go          |   45 +-
 .../client-go/tools/clientcmd/api/types.go    |    3 +
 .../client-go/tools/clientcmd/api/v1/types.go |    3 +
 .../api/v1/zz_generated.conversion.go         |    2 +
 .../tools/clientcmd/client_config.go          |   20 +-
 .../client-go/tools/clientcmd/overrides.go    |    4 +
 .../client-go/tools/clientcmd/validation.go   |   34 +-
 .../k8s.io/client-go/tools/metrics/metrics.go |   52 +-
 vendor/k8s.io/client-go/tools/pager/pager.go  |   21 +-
 .../client-go/tools/remotecommand/doc.go      |   20 +
 .../tools/remotecommand/errorstream.go        |   55 +
 .../client-go/tools/remotecommand/reader.go   |   41 +
 .../tools/remotecommand/remotecommand.go      |  142 ++
 .../client-go/tools/remotecommand/resize.go   |   33 +
 .../client-go/tools/remotecommand/v1.go       |  160 ++
 .../client-go/tools/remotecommand/v2.go       |  195 ++
 .../client-go/tools/remotecommand/v3.go       |  111 +
 .../client-go/tools/remotecommand/v4.go       |  119 +
 vendor/k8s.io/client-go/transport/cache.go    |   29 +-
 .../client-go/transport/cert_rotation.go      |  176 ++
 vendor/k8s.io/client-go/transport/config.go   |    7 +-
 .../k8s.io/client-go/transport/spdy/spdy.go   |   94 +
 .../k8s.io/client-go/transport/transport.go   |   60 +-
 .../util/connrotation/connrotation.go         |   10 +-
 vendor/k8s.io/client-go/util/exec/exec.go     |   52 +
 vendor/k8s.io/client-go/util/jsonpath/doc.go  |   20 +
 .../client-go/util/jsonpath/jsonpath.go       |  525 +++++
 vendor/k8s.io/client-go/util/jsonpath/node.go |  256 +++
 .../k8s.io/client-go/util/jsonpath/parser.go  |  524 +++++
 vendor/k8s.io/client-go/util/retry/util.go    |  105 -
 .../util/workqueue/default_rate_limiters.go   |   48 +
 .../client-go/util/workqueue/metrics.go       |   11 +-
 vendor/k8s.io/component-base/LICENSE          |  202 ++
 .../component-base/version/.gitattributes     |    1 +
 vendor/k8s.io/component-base/version/base.go  |   63 +
 vendor/k8s.io/component-base/version/def.bzl  |   39 +
 .../k8s.io/component-base/version/version.go  |   42 +
 .../k8s.io/kube-openapi/pkg/common/common.go  |  192 ++
 vendor/k8s.io/kube-openapi/pkg/common/doc.go  |   19 +
 .../pkg/util/proto/validation/errors.go       |   79 +
 .../pkg/util/proto/validation/types.go        |  299 +++
 .../pkg/util/proto/validation/validation.go   |   30 +
 vendor/k8s.io/kubectl/pkg/cmd/util/factory.go |   66 +
 .../pkg/cmd/util/factory_client_access.go     |  177 ++
 vendor/k8s.io/kubectl/pkg/cmd/util/helpers.go |  728 ++++++
 .../pkg/cmd/util/kubectl_match_version.go     |  129 ++
 .../k8s.io/kubectl/pkg/cmd/util/printing.go   |   29 +
 vendor/k8s.io/kubectl/pkg/drain/cordon.go     |   16 +-
 vendor/k8s.io/kubectl/pkg/drain/default.go    |    2 +-
 vendor/k8s.io/kubectl/pkg/drain/drain.go      |  168 +-
 vendor/k8s.io/kubectl/pkg/drain/filters.go    |   23 +-
 vendor/k8s.io/kubectl/pkg/scheme/install.go   |   83 +
 vendor/k8s.io/kubectl/pkg/scheme/scheme.go    |   39 +
 .../kubectl/pkg/util/interrupt/interrupt.go   |  104 +
 .../retry => kubectl/pkg/util/openapi}/OWNERS |    4 +-
 vendor/k8s.io/kubectl/pkg/util/openapi/doc.go |   21 +
 .../kubectl/pkg/util/openapi/extensions.go    |   27 +
 .../kubectl/pkg/util/openapi/openapi.go       |  128 ++
 .../pkg/util/openapi/openapi_getter.go        |   65 +
 .../pkg/util/openapi/validation/validation.go |  140 ++
 .../pkg/util/templates/command_groups.go      |   59 +
 .../kubectl/pkg/util/templates/markdown.go    |  147 ++
 .../kubectl/pkg/util/templates/normalizers.go |   97 +
 .../kubectl/pkg/util/templates/templater.go   |  298 +++
 .../kubectl/pkg/util/templates/templates.go   |  103 +
 vendor/k8s.io/kubectl/pkg/util/term/resize.go |  132 ++
 .../kubectl/pkg/util/term/resizeevents.go     |   61 +
 .../pkg/util/term/resizeevents_windows.go     |   62 +
 vendor/k8s.io/kubectl/pkg/util/term/term.go   |  110 +
 .../kubectl/pkg/util/term/term_writer.go      |  124 ++
 .../k8s.io/kubectl/pkg/validation/schema.go   |  103 +
 vendor/k8s.io/utils/exec/doc.go               |   18 +
 vendor/k8s.io/utils/exec/exec.go              |  252 +++
 vendor/modules.txt                            |  169 +-
 vendor/sigs.k8s.io/kustomize/LICENSE          |  201 ++
 .../kustomize/pkg/commands/build/build.go     |  129 ++
 .../kustomize/pkg/constants/constants.go      |   28 +
 .../kustomize/pkg/expansion/expand.go         |  121 +
 .../kustomize/pkg/factory/factory.go          |   39 +
 .../kustomize/pkg/fs/confirmeddir.go          |   93 +
 .../sigs.k8s.io/kustomize/pkg/fs/fakefile.go  |   69 +
 .../kustomize/pkg/fs/fakefileinfo.go          |   47 +
 vendor/sigs.k8s.io/kustomize/pkg/fs/fakefs.go |  185 ++
 vendor/sigs.k8s.io/kustomize/pkg/fs/fs.go     |   44 +
 .../sigs.k8s.io/kustomize/pkg/fs/realfile.go  |   40 +
 vendor/sigs.k8s.io/kustomize/pkg/fs/realfs.go |  122 +
 .../sigs.k8s.io/kustomize/pkg/git/cloner.go   |   75 +
 .../sigs.k8s.io/kustomize/pkg/git/repospec.go |  214 ++
 vendor/sigs.k8s.io/kustomize/pkg/gvk/gvk.go   |  180 ++
 vendor/sigs.k8s.io/kustomize/pkg/ifc/ifc.go   |   73 +
 .../kustomize/pkg/ifc/transformer/factory.go  |   29 +
 .../kustomize/pkg/image/deprecatedimage.go    |   32 +
 .../sigs.k8s.io/kustomize/pkg/image/image.go  |   36 +
 .../pkg/internal/error/configmaperror.go      |   30 +
 .../pkg/internal/error/kustomizationerror.go  |   61 +
 .../pkg/internal/error/patcherror.go          |   32 +
 .../pkg/internal/error/resourceerror.go       |   30 +
 .../pkg/internal/error/secreterror.go         |   30 +
 .../pkg/internal/error/yamlformaterror.go     |   48 +
 .../kustomize/pkg/loader/fileloader.go        |  312 +++
 .../kustomize/pkg/loader/loader.go            |   39 +
 .../kustomize/pkg/patch/json6902.go           |   40 +
 .../kustomize/pkg/patch/strategicmerge.go     |   40 +
 .../pkg/patch/transformer/factory.go          |   83 +
 .../patch/transformer/patchjson6902json.go    |  108 +
 .../sigs.k8s.io/kustomize/pkg/resid/resid.go  |  207 ++
 .../kustomize/pkg/resmap/factory.go           |  123 +
 .../kustomize/pkg/resmap/idslice.go           |   37 +
 .../kustomize/pkg/resmap/resmap.go            |  200 ++
 .../kustomize/pkg/resource/factory.go         |  148 ++
 .../kustomize/pkg/resource/resource.go        |  107 +
 .../kustomize/pkg/target/kusttarget.go        |  315 +++
 .../kustomize/pkg/target/resaccumulator.go    |  161 ++
 .../config/defaultconfig/commonannotations.go |   60 +
 .../config/defaultconfig/commonlabels.go      |  162 ++
 .../config/defaultconfig/defaultconfig.go     |   49 +
 .../config/defaultconfig/nameprefix.go        |   24 +
 .../config/defaultconfig/namereference.go     |  317 +++
 .../config/defaultconfig/namespace.go         |   25 +
 .../config/defaultconfig/varreference.go      |  162 ++
 .../pkg/transformers/config/factory.go        |   87 +
 .../pkg/transformers/config/factorycrd.go     |  201 ++
 .../pkg/transformers/config/fieldspec.go      |  139 ++
 .../transformers/config/namebackreferences.go |  105 +
 .../transformers/config/transformerconfig.go  |  134 ++
 .../kustomize/pkg/transformers/image.go       |  171 ++
 .../pkg/transformers/labelsandannotations.go  |   86 +
 .../pkg/transformers/multitransformer.go      |   95 +
 .../kustomize/pkg/transformers/mutatefield.go |   81 +
 .../pkg/transformers/namereference.go         |  144 ++
 .../kustomize/pkg/transformers/namespace.go   |  121 +
 .../pkg/transformers/nooptransformer.go       |   34 +
 .../pkg/transformers/prefixsuffixname.go      |  109 +
 .../kustomize/pkg/transformers/refvars.go     |   94 +
 .../kustomize/pkg/transformers/transformer.go |   26 +
 .../kustomize/pkg/types/genargs.go            |   64 +
 .../kustomize/pkg/types/generationbehavior.go |   59 +
 .../kustomize/pkg/types/kustomization.go      |  250 +++
 vendor/sigs.k8s.io/kustomize/pkg/types/var.go |  145 ++
 .../structured-merge-diff/v3/LICENSE          |  201 ++
 .../v3/value/allocator.go                     |  203 ++
 .../structured-merge-diff/v3/value/doc.go     |   21 +
 .../structured-merge-diff/v3/value/fields.go  |   97 +
 .../v3/value/jsontagutil.go                   |   91 +
 .../structured-merge-diff/v3/value/list.go    |  139 ++
 .../v3/value/listreflect.go                   |   98 +
 .../v3/value/listunstructured.go              |   74 +
 .../structured-merge-diff/v3/value/map.go     |  270 +++
 .../v3/value/mapreflect.go                    |  209 ++
 .../v3/value/mapunstructured.go               |  190 ++
 .../v3/value/reflectcache.go                  |  463 ++++
 .../structured-merge-diff/v3/value/scalar.go  |   50 +
 .../v3/value/structreflect.go                 |  208 ++
 .../structured-merge-diff/v3/value/value.go   |  347 +++
 .../v3/value/valuereflect.go                  |  294 +++
 .../v3/value/valueunstructured.go             |  178 ++
 vendor/sigs.k8s.io/yaml/.travis.yml           |   15 +-
 vendor/sigs.k8s.io/yaml/OWNERS                |    2 +
 vendor/sigs.k8s.io/yaml/README.md             |   14 +-
 vendor/sigs.k8s.io/yaml/go.mod                |    8 +
 vendor/sigs.k8s.io/yaml/go.sum                |    9 +
 vendor/sigs.k8s.io/yaml/yaml.go               |   61 +
 855 files changed, 98173 insertions(+), 8779 deletions(-)
 create mode 100644 vendor/github.com/Azure/go-ansiterm/LICENSE
 create mode 100644 vendor/github.com/Azure/go-ansiterm/README.md
 create mode 100644 vendor/github.com/Azure/go-ansiterm/constants.go
 create mode 100644 vendor/github.com/Azure/go-ansiterm/context.go
 create mode 100644 vendor/github.com/Azure/go-ansiterm/csi_entry_state.go
 create mode 100644 vendor/github.com/Azure/go-ansiterm/csi_param_state.go
 create mode 100644 vendor/github.com/Azure/go-ansiterm/escape_intermediate_state.go
 create mode 100644 vendor/github.com/Azure/go-ansiterm/escape_state.go
 create mode 100644 vendor/github.com/Azure/go-ansiterm/event_handler.go
 create mode 100644 vendor/github.com/Azure/go-ansiterm/ground_state.go
 create mode 100644 vendor/github.com/Azure/go-ansiterm/osc_string_state.go
 create mode 100644 vendor/github.com/Azure/go-ansiterm/parser.go
 create mode 100644 vendor/github.com/Azure/go-ansiterm/parser_action_helpers.go
 create mode 100644 vendor/github.com/Azure/go-ansiterm/parser_actions.go
 create mode 100644 vendor/github.com/Azure/go-ansiterm/states.go
 create mode 100644 vendor/github.com/Azure/go-ansiterm/utilities.go
 create mode 100644 vendor/github.com/Azure/go-ansiterm/winterm/ansi.go
 create mode 100644 vendor/github.com/Azure/go-ansiterm/winterm/api.go
 create mode 100644 vendor/github.com/Azure/go-ansiterm/winterm/attr_translation.go
 create mode 100644 vendor/github.com/Azure/go-ansiterm/winterm/cursor_helpers.go
 create mode 100644 vendor/github.com/Azure/go-ansiterm/winterm/erase_helpers.go
 create mode 100644 vendor/github.com/Azure/go-ansiterm/winterm/scroll_helper.go
 create mode 100644 vendor/github.com/Azure/go-ansiterm/winterm/utilities.go
 create mode 100644 vendor/github.com/Azure/go-ansiterm/winterm/win_event_handler.go
 create mode 100644 vendor/github.com/MakeNowJust/heredoc/LICENSE
 create mode 100644 vendor/github.com/MakeNowJust/heredoc/README.md
 create mode 100644 vendor/github.com/MakeNowJust/heredoc/heredoc.go
 create mode 100644 vendor/github.com/PuerkitoBio/purell/.gitignore
 create mode 100644 vendor/github.com/PuerkitoBio/purell/.travis.yml
 create mode 100644 vendor/github.com/PuerkitoBio/purell/LICENSE
 create mode 100644 vendor/github.com/PuerkitoBio/purell/README.md
 create mode 100644 vendor/github.com/PuerkitoBio/purell/purell.go
 create mode 100644 vendor/github.com/PuerkitoBio/urlesc/.travis.yml
 create mode 100644 vendor/github.com/PuerkitoBio/urlesc/LICENSE
 create mode 100644 vendor/github.com/PuerkitoBio/urlesc/README.md
 create mode 100644 vendor/github.com/PuerkitoBio/urlesc/urlesc.go
 create mode 100644 vendor/github.com/docker/docker/pkg/term/ascii.go
 create mode 100644 vendor/github.com/docker/docker/pkg/term/proxy.go
 create mode 100644 vendor/github.com/docker/docker/pkg/term/tc.go
 create mode 100644 vendor/github.com/docker/docker/pkg/term/term.go
 create mode 100644 vendor/github.com/docker/docker/pkg/term/term_windows.go
 create mode 100644 vendor/github.com/docker/docker/pkg/term/termios_bsd.go
 create mode 100644 vendor/github.com/docker/docker/pkg/term/termios_linux.go
 create mode 100644 vendor/github.com/docker/docker/pkg/term/windows/ansi_reader.go
 create mode 100644 vendor/github.com/docker/docker/pkg/term/windows/ansi_writer.go
 create mode 100644 vendor/github.com/docker/docker/pkg/term/windows/console.go
 create mode 100644 vendor/github.com/docker/docker/pkg/term/windows/windows.go
 create mode 100644 vendor/github.com/docker/docker/pkg/term/winsize.go
 create mode 100644 vendor/github.com/docker/spdystream/CONTRIBUTING.md
 create mode 100644 vendor/github.com/docker/spdystream/LICENSE
 create mode 100644 vendor/github.com/docker/spdystream/LICENSE.docs
 create mode 100644 vendor/github.com/docker/spdystream/MAINTAINERS
 create mode 100644 vendor/github.com/docker/spdystream/README.md
 create mode 100644 vendor/github.com/docker/spdystream/connection.go
 create mode 100644 vendor/github.com/docker/spdystream/handlers.go
 create mode 100644 vendor/github.com/docker/spdystream/priority.go
 create mode 100644 vendor/github.com/docker/spdystream/spdy/dictionary.go
 create mode 100644 vendor/github.com/docker/spdystream/spdy/read.go
 create mode 100644 vendor/github.com/docker/spdystream/spdy/types.go
 create mode 100644 vendor/github.com/docker/spdystream/spdy/write.go
 create mode 100644 vendor/github.com/docker/spdystream/stream.go
 create mode 100644 vendor/github.com/docker/spdystream/utils.go
 create mode 100644 vendor/github.com/emicklei/go-restful/.gitignore
 create mode 100644 vendor/github.com/emicklei/go-restful/.travis.yml
 create mode 100644 vendor/github.com/emicklei/go-restful/CHANGES.md
 create mode 100644 vendor/github.com/emicklei/go-restful/LICENSE
 create mode 100644 vendor/github.com/emicklei/go-restful/Makefile
 create mode 100644 vendor/github.com/emicklei/go-restful/README.md
 create mode 100644 vendor/github.com/emicklei/go-restful/Srcfile
 create mode 100644 vendor/github.com/emicklei/go-restful/bench_test.sh
 create mode 100644 vendor/github.com/emicklei/go-restful/compress.go
 create mode 100644 vendor/github.com/emicklei/go-restful/compressor_cache.go
 create mode 100644 vendor/github.com/emicklei/go-restful/compressor_pools.go
 create mode 100644 vendor/github.com/emicklei/go-restful/compressors.go
 create mode 100644 vendor/github.com/emicklei/go-restful/constants.go
 create mode 100644 vendor/github.com/emicklei/go-restful/container.go
 create mode 100644 vendor/github.com/emicklei/go-restful/cors_filter.go
 create mode 100644 vendor/github.com/emicklei/go-restful/coverage.sh
 create mode 100644 vendor/github.com/emicklei/go-restful/curly.go
 create mode 100644 vendor/github.com/emicklei/go-restful/curly_route.go
 create mode 100644 vendor/github.com/emicklei/go-restful/doc.go
 create mode 100644 vendor/github.com/emicklei/go-restful/entity_accessors.go
 create mode 100644 vendor/github.com/emicklei/go-restful/filter.go
 create mode 100644 vendor/github.com/emicklei/go-restful/json.go
 create mode 100644 vendor/github.com/emicklei/go-restful/jsoniter.go
 create mode 100644 vendor/github.com/emicklei/go-restful/jsr311.go
 create mode 100644 vendor/github.com/emicklei/go-restful/log/log.go
 create mode 100644 vendor/github.com/emicklei/go-restful/logger.go
 create mode 100644 vendor/github.com/emicklei/go-restful/mime.go
 create mode 100644 vendor/github.com/emicklei/go-restful/options_filter.go
 create mode 100644 vendor/github.com/emicklei/go-restful/parameter.go
 create mode 100644 vendor/github.com/emicklei/go-restful/path_expression.go
 create mode 100644 vendor/github.com/emicklei/go-restful/path_processor.go
 create mode 100644 vendor/github.com/emicklei/go-restful/request.go
 create mode 100644 vendor/github.com/emicklei/go-restful/response.go
 create mode 100644 vendor/github.com/emicklei/go-restful/route.go
 create mode 100644 vendor/github.com/emicklei/go-restful/route_builder.go
 create mode 100644 vendor/github.com/emicklei/go-restful/router.go
 create mode 100644 vendor/github.com/emicklei/go-restful/service_error.go
 create mode 100644 vendor/github.com/emicklei/go-restful/web_service.go
 create mode 100644 vendor/github.com/emicklei/go-restful/web_service_container.go
 create mode 100644 vendor/github.com/evanphx/json-patch/.travis.yml
 create mode 100644 vendor/github.com/evanphx/json-patch/LICENSE
 create mode 100644 vendor/github.com/evanphx/json-patch/README.md
 create mode 100644 vendor/github.com/evanphx/json-patch/errors.go
 create mode 100644 vendor/github.com/evanphx/json-patch/merge.go
 create mode 100644 vendor/github.com/evanphx/json-patch/patch.go
 create mode 100644 vendor/github.com/exponent-io/jsonpath/.gitignore
 create mode 100644 vendor/github.com/exponent-io/jsonpath/.travis.yml
 create mode 100644 vendor/github.com/exponent-io/jsonpath/LICENSE
 create mode 100644 vendor/github.com/exponent-io/jsonpath/README.md
 create mode 100644 vendor/github.com/exponent-io/jsonpath/decoder.go
 create mode 100644 vendor/github.com/exponent-io/jsonpath/path.go
 create mode 100644 vendor/github.com/exponent-io/jsonpath/pathaction.go
 create mode 100644 vendor/github.com/go-openapi/jsonpointer/.editorconfig
 create mode 100644 vendor/github.com/go-openapi/jsonpointer/.gitignore
 create mode 100644 vendor/github.com/go-openapi/jsonpointer/.travis.yml
 create mode 100644 vendor/github.com/go-openapi/jsonpointer/CODE_OF_CONDUCT.md
 create mode 100644 vendor/github.com/go-openapi/jsonpointer/LICENSE
 create mode 100644 vendor/github.com/go-openapi/jsonpointer/README.md
 create mode 100644 vendor/github.com/go-openapi/jsonpointer/go.mod
 create mode 100644 vendor/github.com/go-openapi/jsonpointer/go.sum
 create mode 100644 vendor/github.com/go-openapi/jsonpointer/pointer.go
 create mode 100644 vendor/github.com/go-openapi/jsonreference/.gitignore
 create mode 100644 vendor/github.com/go-openapi/jsonreference/.travis.yml
 create mode 100644 vendor/github.com/go-openapi/jsonreference/CODE_OF_CONDUCT.md
 create mode 100644 vendor/github.com/go-openapi/jsonreference/LICENSE
 create mode 100644 vendor/github.com/go-openapi/jsonreference/README.md
 create mode 100644 vendor/github.com/go-openapi/jsonreference/go.mod
 create mode 100644 vendor/github.com/go-openapi/jsonreference/go.sum
 create mode 100644 vendor/github.com/go-openapi/jsonreference/reference.go
 create mode 100644 vendor/github.com/go-openapi/spec/.editorconfig
 create mode 100644 vendor/github.com/go-openapi/spec/.gitignore
 create mode 100644 vendor/github.com/go-openapi/spec/.golangci.yml
 create mode 100644 vendor/github.com/go-openapi/spec/.travis.yml
 create mode 100644 vendor/github.com/go-openapi/spec/CODE_OF_CONDUCT.md
 create mode 100644 vendor/github.com/go-openapi/spec/LICENSE
 create mode 100644 vendor/github.com/go-openapi/spec/README.md
 create mode 100644 vendor/github.com/go-openapi/spec/bindata.go
 create mode 100644 vendor/github.com/go-openapi/spec/cache.go
 create mode 100644 vendor/github.com/go-openapi/spec/contact_info.go
 create mode 100644 vendor/github.com/go-openapi/spec/debug.go
 create mode 100644 vendor/github.com/go-openapi/spec/expander.go
 create mode 100644 vendor/github.com/go-openapi/spec/external_docs.go
 create mode 100644 vendor/github.com/go-openapi/spec/go.mod
 create mode 100644 vendor/github.com/go-openapi/spec/go.sum
 create mode 100644 vendor/github.com/go-openapi/spec/header.go
 create mode 100644 vendor/github.com/go-openapi/spec/info.go
 create mode 100644 vendor/github.com/go-openapi/spec/items.go
 create mode 100644 vendor/github.com/go-openapi/spec/license.go
 create mode 100644 vendor/github.com/go-openapi/spec/normalizer.go
 create mode 100644 vendor/github.com/go-openapi/spec/operation.go
 create mode 100644 vendor/github.com/go-openapi/spec/parameter.go
 create mode 100644 vendor/github.com/go-openapi/spec/path_item.go
 create mode 100644 vendor/github.com/go-openapi/spec/paths.go
 create mode 100644 vendor/github.com/go-openapi/spec/ref.go
 create mode 100644 vendor/github.com/go-openapi/spec/response.go
 create mode 100644 vendor/github.com/go-openapi/spec/responses.go
 create mode 100644 vendor/github.com/go-openapi/spec/schema.go
 create mode 100644 vendor/github.com/go-openapi/spec/schema_loader.go
 create mode 100644 vendor/github.com/go-openapi/spec/security_scheme.go
 create mode 100644 vendor/github.com/go-openapi/spec/spec.go
 create mode 100644 vendor/github.com/go-openapi/spec/swagger.go
 create mode 100644 vendor/github.com/go-openapi/spec/tag.go
 create mode 100644 vendor/github.com/go-openapi/spec/unused.go
 create mode 100644 vendor/github.com/go-openapi/spec/xml_object.go
 create mode 100644 vendor/github.com/go-openapi/swag/.editorconfig
 create mode 100644 vendor/github.com/go-openapi/swag/.gitignore
 create mode 100644 vendor/github.com/go-openapi/swag/.golangci.yml
 create mode 100644 vendor/github.com/go-openapi/swag/.travis.yml
 create mode 100644 vendor/github.com/go-openapi/swag/CODE_OF_CONDUCT.md
 create mode 100644 vendor/github.com/go-openapi/swag/LICENSE
 create mode 100644 vendor/github.com/go-openapi/swag/README.md
 create mode 100644 vendor/github.com/go-openapi/swag/convert.go
 create mode 100644 vendor/github.com/go-openapi/swag/convert_types.go
 create mode 100644 vendor/github.com/go-openapi/swag/doc.go
 create mode 100644 vendor/github.com/go-openapi/swag/go.mod
 create mode 100644 vendor/github.com/go-openapi/swag/go.sum
 create mode 100644 vendor/github.com/go-openapi/swag/json.go
 create mode 100644 vendor/github.com/go-openapi/swag/loading.go
 create mode 100644 vendor/github.com/go-openapi/swag/name_lexem.go
 create mode 100644 vendor/github.com/go-openapi/swag/net.go
 create mode 100644 vendor/github.com/go-openapi/swag/path.go
 create mode 100644 vendor/github.com/go-openapi/swag/post_go18.go
 create mode 100644 vendor/github.com/go-openapi/swag/post_go19.go
 create mode 100644 vendor/github.com/go-openapi/swag/pre_go18.go
 create mode 100644 vendor/github.com/go-openapi/swag/pre_go19.go
 create mode 100644 vendor/github.com/go-openapi/swag/split.go
 create mode 100644 vendor/github.com/go-openapi/swag/util.go
 create mode 100644 vendor/github.com/go-openapi/swag/yaml.go
 create mode 100644 vendor/github.com/google/btree/.travis.yml
 create mode 100644 vendor/github.com/google/btree/LICENSE
 create mode 100644 vendor/github.com/google/btree/README.md
 create mode 100644 vendor/github.com/google/btree/btree.go
 create mode 100644 vendor/github.com/gregjones/httpcache/.travis.yml
 create mode 100644 vendor/github.com/gregjones/httpcache/LICENSE.txt
 create mode 100644 vendor/github.com/gregjones/httpcache/README.md
 create mode 100644 vendor/github.com/gregjones/httpcache/diskcache/diskcache.go
 create mode 100644 vendor/github.com/gregjones/httpcache/httpcache.go
 create mode 100644 vendor/github.com/inconshreveable/mousetrap/LICENSE
 create mode 100644 vendor/github.com/inconshreveable/mousetrap/README.md
 create mode 100644 vendor/github.com/inconshreveable/mousetrap/trap_others.go
 create mode 100644 vendor/github.com/inconshreveable/mousetrap/trap_windows.go
 create mode 100644 vendor/github.com/inconshreveable/mousetrap/trap_windows_1.4.go
 create mode 100644 vendor/github.com/liggitt/tabwriter/.travis.yml
 create mode 100644 vendor/github.com/liggitt/tabwriter/LICENSE
 create mode 100644 vendor/github.com/liggitt/tabwriter/README.md
 create mode 100644 vendor/github.com/liggitt/tabwriter/tabwriter.go
 create mode 100644 vendor/github.com/mailru/easyjson/LICENSE
 create mode 100644 vendor/github.com/mailru/easyjson/buffer/pool.go
 create mode 100644 vendor/github.com/mailru/easyjson/jlexer/bytestostr.go
 create mode 100644 vendor/github.com/mailru/easyjson/jlexer/bytestostr_nounsafe.go
 create mode 100644 vendor/github.com/mailru/easyjson/jlexer/error.go
 create mode 100644 vendor/github.com/mailru/easyjson/jlexer/lexer.go
 create mode 100644 vendor/github.com/mailru/easyjson/jwriter/writer.go
 create mode 100644 vendor/github.com/mitchellh/go-wordwrap/LICENSE.md
 create mode 100644 vendor/github.com/mitchellh/go-wordwrap/README.md
 create mode 100644 vendor/github.com/mitchellh/go-wordwrap/go.mod
 create mode 100644 vendor/github.com/mitchellh/go-wordwrap/wordwrap.go
 create mode 100644 vendor/github.com/peterbourgon/diskv/LICENSE
 create mode 100644 vendor/github.com/peterbourgon/diskv/README.md
 create mode 100644 vendor/github.com/peterbourgon/diskv/compression.go
 create mode 100644 vendor/github.com/peterbourgon/diskv/diskv.go
 create mode 100644 vendor/github.com/peterbourgon/diskv/index.go
 create mode 100644 vendor/github.com/russross/blackfriday/.gitignore
 create mode 100644 vendor/github.com/russross/blackfriday/.travis.yml
 create mode 100644 vendor/github.com/russross/blackfriday/LICENSE.txt
 create mode 100644 vendor/github.com/russross/blackfriday/README.md
 create mode 100644 vendor/github.com/russross/blackfriday/block.go
 create mode 100644 vendor/github.com/russross/blackfriday/doc.go
 create mode 100644 vendor/github.com/russross/blackfriday/go.mod
 create mode 100644 vendor/github.com/russross/blackfriday/html.go
 create mode 100644 vendor/github.com/russross/blackfriday/inline.go
 create mode 100644 vendor/github.com/russross/blackfriday/latex.go
 create mode 100644 vendor/github.com/russross/blackfriday/markdown.go
 create mode 100644 vendor/github.com/russross/blackfriday/smartypants.go
 create mode 100644 vendor/github.com/spf13/cobra/.gitignore
 create mode 100644 vendor/github.com/spf13/cobra/.mailmap
 create mode 100644 vendor/github.com/spf13/cobra/.travis.yml
 create mode 100644 vendor/github.com/spf13/cobra/LICENSE.txt
 create mode 100644 vendor/github.com/spf13/cobra/README.md
 create mode 100644 vendor/github.com/spf13/cobra/args.go
 create mode 100644 vendor/github.com/spf13/cobra/bash_completions.go
 create mode 100644 vendor/github.com/spf13/cobra/bash_completions.md
 create mode 100644 vendor/github.com/spf13/cobra/cobra.go
 create mode 100644 vendor/github.com/spf13/cobra/command.go
 create mode 100644 vendor/github.com/spf13/cobra/command_notwin.go
 create mode 100644 vendor/github.com/spf13/cobra/command_win.go
 create mode 100644 vendor/github.com/spf13/cobra/go.mod
 create mode 100644 vendor/github.com/spf13/cobra/go.sum
 create mode 100644 vendor/github.com/spf13/cobra/powershell_completions.go
 create mode 100644 vendor/github.com/spf13/cobra/powershell_completions.md
 create mode 100644 vendor/github.com/spf13/cobra/shell_completions.go
 create mode 100644 vendor/github.com/spf13/cobra/zsh_completions.go
 create mode 100644 vendor/github.com/spf13/cobra/zsh_completions.md
 create mode 100644 vendor/golang.org/x/crypto/blowfish/block.go
 create mode 100644 vendor/golang.org/x/crypto/blowfish/cipher.go
 create mode 100644 vendor/golang.org/x/crypto/blowfish/const.go
 delete mode 100644 vendor/golang.org/x/crypto/poly1305/sum_arm.go
 delete mode 100644 vendor/golang.org/x/crypto/poly1305/sum_arm.s
 create mode 100644 vendor/golang.org/x/crypto/ssh/internal/bcrypt_pbkdf/bcrypt_pbkdf.go
 create mode 100644 vendor/golang.org/x/text/encoding/encoding.go
 create mode 100644 vendor/golang.org/x/text/encoding/internal/identifier/identifier.go
 create mode 100644 vendor/golang.org/x/text/encoding/internal/identifier/mib.go
 create mode 100644 vendor/golang.org/x/text/encoding/internal/internal.go
 create mode 100644 vendor/golang.org/x/text/encoding/unicode/override.go
 create mode 100644 vendor/golang.org/x/text/encoding/unicode/unicode.go
 create mode 100644 vendor/golang.org/x/text/internal/utf8internal/utf8internal.go
 create mode 100644 vendor/golang.org/x/text/runes/cond.go
 create mode 100644 vendor/golang.org/x/text/runes/runes.go
 create mode 100644 vendor/golang.org/x/text/width/kind_string.go
 create mode 100644 vendor/golang.org/x/text/width/tables10.0.0.go
 create mode 100644 vendor/golang.org/x/text/width/tables11.0.0.go
 create mode 100644 vendor/golang.org/x/text/width/tables9.0.0.go
 create mode 100644 vendor/golang.org/x/text/width/transform.go
 create mode 100644 vendor/golang.org/x/text/width/trieval.go
 create mode 100644 vendor/golang.org/x/text/width/width.go
 create mode 100644 vendor/k8s.io/api/admission/v1/doc.go
 create mode 100644 vendor/k8s.io/api/admission/v1/generated.pb.go
 create mode 100644 vendor/k8s.io/api/admission/v1/generated.proto
 create mode 100644 vendor/k8s.io/api/admission/v1/register.go
 create mode 100644 vendor/k8s.io/api/admission/v1/types.go
 create mode 100644 vendor/k8s.io/api/admission/v1/types_swagger_doc_generated.go
 create mode 100644 vendor/k8s.io/api/admission/v1/zz_generated.deepcopy.go
 create mode 100644 vendor/k8s.io/api/admission/v1beta1/doc.go
 create mode 100644 vendor/k8s.io/api/admission/v1beta1/generated.pb.go
 create mode 100644 vendor/k8s.io/api/admission/v1beta1/generated.proto
 create mode 100644 vendor/k8s.io/api/admission/v1beta1/register.go
 create mode 100644 vendor/k8s.io/api/admission/v1beta1/types.go
 create mode 100644 vendor/k8s.io/api/admission/v1beta1/types_swagger_doc_generated.go
 create mode 100644 vendor/k8s.io/api/admission/v1beta1/zz_generated.deepcopy.go
 create mode 100644 vendor/k8s.io/api/imagepolicy/v1alpha1/doc.go
 create mode 100644 vendor/k8s.io/api/imagepolicy/v1alpha1/generated.pb.go
 create mode 100644 vendor/k8s.io/api/imagepolicy/v1alpha1/generated.proto
 create mode 100644 vendor/k8s.io/api/imagepolicy/v1alpha1/register.go
 create mode 100644 vendor/k8s.io/api/imagepolicy/v1alpha1/types.go
 create mode 100644 vendor/k8s.io/api/imagepolicy/v1alpha1/types_swagger_doc_generated.go
 create mode 100644 vendor/k8s.io/api/imagepolicy/v1alpha1/zz_generated.deepcopy.go
 create mode 100644 vendor/k8s.io/api/networking/v1beta1/well_known_annotations.go
 create mode 100644 vendor/k8s.io/apimachinery/pkg/api/equality/semantic.go
 create mode 100644 vendor/k8s.io/apimachinery/pkg/api/validation/doc.go
 create mode 100644 vendor/k8s.io/apimachinery/pkg/api/validation/generic.go
 create mode 100644 vendor/k8s.io/apimachinery/pkg/api/validation/objectmeta.go
 create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructuredscheme/scheme.go
 create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1/validation/validation.go
 create mode 100644 vendor/k8s.io/apimachinery/pkg/util/duration/duration.go
 create mode 100644 vendor/k8s.io/apimachinery/pkg/util/httpstream/doc.go
 create mode 100644 vendor/k8s.io/apimachinery/pkg/util/httpstream/httpstream.go
 create mode 100644 vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/connection.go
 create mode 100644 vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/roundtripper.go
 create mode 100644 vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/upgrade.go
 create mode 100644 vendor/k8s.io/apimachinery/pkg/util/remotecommand/constants.go
 create mode 100644 vendor/k8s.io/apimachinery/third_party/forked/golang/netutil/addr.go
 create mode 100644 vendor/k8s.io/apiserver/pkg/apis/config/v1/defaults.go
 create mode 100644 vendor/k8s.io/cli-runtime/LICENSE
 create mode 100644 vendor/k8s.io/cli-runtime/pkg/genericclioptions/builder_flags.go
 create mode 100644 vendor/k8s.io/cli-runtime/pkg/genericclioptions/builder_flags_fake.go
 create mode 100644 vendor/k8s.io/cli-runtime/pkg/genericclioptions/config_flags.go
 create mode 100644 vendor/k8s.io/cli-runtime/pkg/genericclioptions/config_flags_fake.go
 create mode 100644 vendor/k8s.io/cli-runtime/pkg/genericclioptions/doc.go
 create mode 100644 vendor/k8s.io/cli-runtime/pkg/genericclioptions/filename_flags.go
 create mode 100644 vendor/k8s.io/cli-runtime/pkg/genericclioptions/io_options.go
 create mode 100644 vendor/k8s.io/cli-runtime/pkg/genericclioptions/json_yaml_flags.go
 create mode 100644 vendor/k8s.io/cli-runtime/pkg/genericclioptions/jsonpath_flags.go
 create mode 100644 vendor/k8s.io/cli-runtime/pkg/genericclioptions/kube_template_flags.go
 create mode 100644 vendor/k8s.io/cli-runtime/pkg/genericclioptions/name_flags.go
 create mode 100644 vendor/k8s.io/cli-runtime/pkg/genericclioptions/print_flags.go
 create mode 100644 vendor/k8s.io/cli-runtime/pkg/genericclioptions/record_flags.go
 create mode 100644 vendor/k8s.io/cli-runtime/pkg/genericclioptions/template_flags.go
 create mode 100644 vendor/k8s.io/cli-runtime/pkg/kustomize/builder.go
 create mode 100644 vendor/k8s.io/cli-runtime/pkg/kustomize/k8sdeps/configmapandsecret/configmapfactory.go
 create mode 100644 vendor/k8s.io/cli-runtime/pkg/kustomize/k8sdeps/configmapandsecret/kv.go
 create mode 100644 vendor/k8s.io/cli-runtime/pkg/kustomize/k8sdeps/configmapandsecret/secretfactory.go
 create mode 100644 vendor/k8s.io/cli-runtime/pkg/kustomize/k8sdeps/doc.go
 create mode 100644 vendor/k8s.io/cli-runtime/pkg/kustomize/k8sdeps/factory.go
 create mode 100644 vendor/k8s.io/cli-runtime/pkg/kustomize/k8sdeps/kunstruct/factory.go
 create mode 100644 vendor/k8s.io/cli-runtime/pkg/kustomize/k8sdeps/kunstruct/helper.go
 create mode 100644 vendor/k8s.io/cli-runtime/pkg/kustomize/k8sdeps/kunstruct/kunstruct.go
 create mode 100644 vendor/k8s.io/cli-runtime/pkg/kustomize/k8sdeps/kv/kv.go
 create mode 100644 vendor/k8s.io/cli-runtime/pkg/kustomize/k8sdeps/transformer/factory.go
 create mode 100644 vendor/k8s.io/cli-runtime/pkg/kustomize/k8sdeps/transformer/hash/hash.go
 create mode 100644 vendor/k8s.io/cli-runtime/pkg/kustomize/k8sdeps/transformer/hash/namehash.go
 create mode 100644 vendor/k8s.io/cli-runtime/pkg/kustomize/k8sdeps/transformer/patch/patch.go
 create mode 100644 vendor/k8s.io/cli-runtime/pkg/kustomize/k8sdeps/transformer/patch/patchconflictdetector.go
 create mode 100644 vendor/k8s.io/cli-runtime/pkg/kustomize/k8sdeps/validator/validators.go
 create mode 100644 vendor/k8s.io/cli-runtime/pkg/printers/discard.go
 create mode 100644 vendor/k8s.io/cli-runtime/pkg/printers/doc.go
 create mode 100644 vendor/k8s.io/cli-runtime/pkg/printers/interface.go
 create mode 100644 vendor/k8s.io/cli-runtime/pkg/printers/json.go
 create mode 100644 vendor/k8s.io/cli-runtime/pkg/printers/jsonpath.go
 create mode 100644 vendor/k8s.io/cli-runtime/pkg/printers/name.go
 create mode 100644 vendor/k8s.io/cli-runtime/pkg/printers/sourcechecker.go
 create mode 100644 vendor/k8s.io/cli-runtime/pkg/printers/tableprinter.go
 create mode 100644 vendor/k8s.io/cli-runtime/pkg/printers/tabwriter.go
 create mode 100644 vendor/k8s.io/cli-runtime/pkg/printers/template.go
 create mode 100644 vendor/k8s.io/cli-runtime/pkg/printers/typesetter.go
 create mode 100644 vendor/k8s.io/cli-runtime/pkg/resource/builder.go
 create mode 100644 vendor/k8s.io/cli-runtime/pkg/resource/client.go
 create mode 100644 vendor/k8s.io/cli-runtime/pkg/resource/crd_finder.go
 create mode 100644 vendor/k8s.io/cli-runtime/pkg/resource/doc.go
 create mode 100644 vendor/k8s.io/cli-runtime/pkg/resource/dry_run_verifier.go
 create mode 100644 vendor/k8s.io/cli-runtime/pkg/resource/fake.go
 create mode 100644 vendor/k8s.io/cli-runtime/pkg/resource/helper.go
 create mode 100644 vendor/k8s.io/cli-runtime/pkg/resource/interfaces.go
 create mode 100644 vendor/k8s.io/cli-runtime/pkg/resource/mapper.go
 create mode 100644 vendor/k8s.io/cli-runtime/pkg/resource/metadata_decoder.go
 create mode 100644 vendor/k8s.io/cli-runtime/pkg/resource/result.go
 create mode 100644 vendor/k8s.io/cli-runtime/pkg/resource/scheme.go
 create mode 100644 vendor/k8s.io/cli-runtime/pkg/resource/selector.go
 create mode 100644 vendor/k8s.io/cli-runtime/pkg/resource/visitor.go
 create mode 100644 vendor/k8s.io/client-go/discovery/cached/disk/cached_discovery.go
 create mode 100644 vendor/k8s.io/client-go/discovery/cached/disk/round_tripper.go
 create mode 100644 vendor/k8s.io/client-go/dynamic/interface.go
 create mode 100644 vendor/k8s.io/client-go/dynamic/scheme.go
 create mode 100644 vendor/k8s.io/client-go/dynamic/simple.go
 delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/tokenreview_expansion.go
 delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/tokenreview_expansion.go
 delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/localsubjectaccessreview_expansion.go
 delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/selfsubjectaccessreview_expansion.go
 delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/selfsubjectrulesreview_expansion.go
 delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/subjectaccessreview_expansion.go
 delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/localsubjectaccessreview_expansion.go
 delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/selfsubjectaccessreview_expansion.go
 delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/selfsubjectrulesreview_expansion.go
 delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/subjectaccessreview_expansion.go
 delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/core/v1/serviceaccount_expansion.go
 create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/ingressclass.go
 create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/storage/v1/csidriver.go
 create mode 100644 vendor/k8s.io/client-go/plugin/pkg/client/auth/exec/metrics.go
 create mode 100644 vendor/k8s.io/client-go/restmapper/category_expansion.go
 create mode 100644 vendor/k8s.io/client-go/restmapper/discovery.go
 create mode 100644 vendor/k8s.io/client-go/restmapper/shortcut.go
 create mode 100644 vendor/k8s.io/client-go/scale/client.go
 create mode 100644 vendor/k8s.io/client-go/scale/doc.go
 create mode 100644 vendor/k8s.io/client-go/scale/interfaces.go
 create mode 100644 vendor/k8s.io/client-go/scale/scheme/appsint/doc.go
 create mode 100644 vendor/k8s.io/client-go/scale/scheme/appsint/register.go
 create mode 100644 vendor/k8s.io/client-go/scale/scheme/appsv1beta1/conversion.go
 create mode 100644 vendor/k8s.io/client-go/scale/scheme/appsv1beta1/doc.go
 create mode 100644 vendor/k8s.io/client-go/scale/scheme/appsv1beta1/register.go
 create mode 100644 vendor/k8s.io/client-go/scale/scheme/appsv1beta1/zz_generated.conversion.go
 create mode 100644 vendor/k8s.io/client-go/scale/scheme/appsv1beta2/conversion.go
 create mode 100644 vendor/k8s.io/client-go/scale/scheme/appsv1beta2/doc.go
 create mode 100644 vendor/k8s.io/client-go/scale/scheme/appsv1beta2/register.go
 create mode 100644 vendor/k8s.io/client-go/scale/scheme/appsv1beta2/zz_generated.conversion.go
 create mode 100644 vendor/k8s.io/client-go/scale/scheme/autoscalingv1/conversion.go
 create mode 100644 vendor/k8s.io/client-go/scale/scheme/autoscalingv1/doc.go
 create mode 100644 vendor/k8s.io/client-go/scale/scheme/autoscalingv1/register.go
 create mode 100644 vendor/k8s.io/client-go/scale/scheme/autoscalingv1/zz_generated.conversion.go
 create mode 100644 vendor/k8s.io/client-go/scale/scheme/doc.go
 create mode 100644 vendor/k8s.io/client-go/scale/scheme/extensionsint/doc.go
 create mode 100644 vendor/k8s.io/client-go/scale/scheme/extensionsint/register.go
 create mode 100644 vendor/k8s.io/client-go/scale/scheme/extensionsv1beta1/conversion.go
 create mode 100644 vendor/k8s.io/client-go/scale/scheme/extensionsv1beta1/doc.go
 create mode 100644 vendor/k8s.io/client-go/scale/scheme/extensionsv1beta1/register.go
 create mode 100644 vendor/k8s.io/client-go/scale/scheme/extensionsv1beta1/zz_generated.conversion.go
 create mode 100644 vendor/k8s.io/client-go/scale/scheme/register.go
 create mode 100644 vendor/k8s.io/client-go/scale/scheme/types.go
 create mode 100644 vendor/k8s.io/client-go/scale/scheme/zz_generated.deepcopy.go
 create mode 100644 vendor/k8s.io/client-go/scale/util.go
 create mode 100644 vendor/k8s.io/client-go/third_party/forked/golang/template/exec.go
 create mode 100644 vendor/k8s.io/client-go/third_party/forked/golang/template/funcs.go
 create mode 100644 vendor/k8s.io/client-go/tools/remotecommand/doc.go
 create mode 100644 vendor/k8s.io/client-go/tools/remotecommand/errorstream.go
 create mode 100644 vendor/k8s.io/client-go/tools/remotecommand/reader.go
 create mode 100644 vendor/k8s.io/client-go/tools/remotecommand/remotecommand.go
 create mode 100644 vendor/k8s.io/client-go/tools/remotecommand/resize.go
 create mode 100644 vendor/k8s.io/client-go/tools/remotecommand/v1.go
 create mode 100644 vendor/k8s.io/client-go/tools/remotecommand/v2.go
 create mode 100644 vendor/k8s.io/client-go/tools/remotecommand/v3.go
 create mode 100644 vendor/k8s.io/client-go/tools/remotecommand/v4.go
 create mode 100644 vendor/k8s.io/client-go/transport/cert_rotation.go
 create mode 100644 vendor/k8s.io/client-go/transport/spdy/spdy.go
 create mode 100644 vendor/k8s.io/client-go/util/exec/exec.go
 create mode 100644 vendor/k8s.io/client-go/util/jsonpath/doc.go
 create mode 100644 vendor/k8s.io/client-go/util/jsonpath/jsonpath.go
 create mode 100644 vendor/k8s.io/client-go/util/jsonpath/node.go
 create mode 100644 vendor/k8s.io/client-go/util/jsonpath/parser.go
 delete mode 100644 vendor/k8s.io/client-go/util/retry/util.go
 create mode 100644 vendor/k8s.io/component-base/LICENSE
 create mode 100644 vendor/k8s.io/component-base/version/.gitattributes
 create mode 100644 vendor/k8s.io/component-base/version/base.go
 create mode 100644 vendor/k8s.io/component-base/version/def.bzl
 create mode 100644 vendor/k8s.io/component-base/version/version.go
 create mode 100644 vendor/k8s.io/kube-openapi/pkg/common/common.go
 create mode 100644 vendor/k8s.io/kube-openapi/pkg/common/doc.go
 create mode 100644 vendor/k8s.io/kube-openapi/pkg/util/proto/validation/errors.go
 create mode 100644 vendor/k8s.io/kube-openapi/pkg/util/proto/validation/types.go
 create mode 100644 vendor/k8s.io/kube-openapi/pkg/util/proto/validation/validation.go
 create mode 100644 vendor/k8s.io/kubectl/pkg/cmd/util/factory.go
 create mode 100644 vendor/k8s.io/kubectl/pkg/cmd/util/factory_client_access.go
 create mode 100644 vendor/k8s.io/kubectl/pkg/cmd/util/helpers.go
 create mode 100644 vendor/k8s.io/kubectl/pkg/cmd/util/kubectl_match_version.go
 create mode 100644 vendor/k8s.io/kubectl/pkg/cmd/util/printing.go
 create mode 100644 vendor/k8s.io/kubectl/pkg/scheme/install.go
 create mode 100644 vendor/k8s.io/kubectl/pkg/scheme/scheme.go
 create mode 100644 vendor/k8s.io/kubectl/pkg/util/interrupt/interrupt.go
 rename vendor/k8s.io/{client-go/util/retry => kubectl/pkg/util/openapi}/OWNERS (65%)
 create mode 100644 vendor/k8s.io/kubectl/pkg/util/openapi/doc.go
 create mode 100644 vendor/k8s.io/kubectl/pkg/util/openapi/extensions.go
 create mode 100644 vendor/k8s.io/kubectl/pkg/util/openapi/openapi.go
 create mode 100644 vendor/k8s.io/kubectl/pkg/util/openapi/openapi_getter.go
 create mode 100644 vendor/k8s.io/kubectl/pkg/util/openapi/validation/validation.go
 create mode 100644 vendor/k8s.io/kubectl/pkg/util/templates/command_groups.go
 create mode 100644 vendor/k8s.io/kubectl/pkg/util/templates/markdown.go
 create mode 100644 vendor/k8s.io/kubectl/pkg/util/templates/normalizers.go
 create mode 100644 vendor/k8s.io/kubectl/pkg/util/templates/templater.go
 create mode 100644 vendor/k8s.io/kubectl/pkg/util/templates/templates.go
 create mode 100644 vendor/k8s.io/kubectl/pkg/util/term/resize.go
 create mode 100644 vendor/k8s.io/kubectl/pkg/util/term/resizeevents.go
 create mode 100644 vendor/k8s.io/kubectl/pkg/util/term/resizeevents_windows.go
 create mode 100644 vendor/k8s.io/kubectl/pkg/util/term/term.go
 create mode 100644 vendor/k8s.io/kubectl/pkg/util/term/term_writer.go
 create mode 100644 vendor/k8s.io/kubectl/pkg/validation/schema.go
 create mode 100644 vendor/k8s.io/utils/exec/doc.go
 create mode 100644 vendor/k8s.io/utils/exec/exec.go
 create mode 100644 vendor/sigs.k8s.io/kustomize/LICENSE
 create mode 100644 vendor/sigs.k8s.io/kustomize/pkg/commands/build/build.go
 create mode 100644 vendor/sigs.k8s.io/kustomize/pkg/constants/constants.go
 create mode 100644 vendor/sigs.k8s.io/kustomize/pkg/expansion/expand.go
 create mode 100644 vendor/sigs.k8s.io/kustomize/pkg/factory/factory.go
 create mode 100644 vendor/sigs.k8s.io/kustomize/pkg/fs/confirmeddir.go
 create mode 100644 vendor/sigs.k8s.io/kustomize/pkg/fs/fakefile.go
 create mode 100644 vendor/sigs.k8s.io/kustomize/pkg/fs/fakefileinfo.go
 create mode 100644 vendor/sigs.k8s.io/kustomize/pkg/fs/fakefs.go
 create mode 100644 vendor/sigs.k8s.io/kustomize/pkg/fs/fs.go
 create mode 100644 vendor/sigs.k8s.io/kustomize/pkg/fs/realfile.go
 create mode 100644 vendor/sigs.k8s.io/kustomize/pkg/fs/realfs.go
 create mode 100644 vendor/sigs.k8s.io/kustomize/pkg/git/cloner.go
 create mode 100644 vendor/sigs.k8s.io/kustomize/pkg/git/repospec.go
 create mode 100644 vendor/sigs.k8s.io/kustomize/pkg/gvk/gvk.go
 create mode 100644 vendor/sigs.k8s.io/kustomize/pkg/ifc/ifc.go
 create mode 100644 vendor/sigs.k8s.io/kustomize/pkg/ifc/transformer/factory.go
 create mode 100644 vendor/sigs.k8s.io/kustomize/pkg/image/deprecatedimage.go
 create mode 100644 vendor/sigs.k8s.io/kustomize/pkg/image/image.go
 create mode 100644 vendor/sigs.k8s.io/kustomize/pkg/internal/error/configmaperror.go
 create mode 100644 vendor/sigs.k8s.io/kustomize/pkg/internal/error/kustomizationerror.go
 create mode 100644 vendor/sigs.k8s.io/kustomize/pkg/internal/error/patcherror.go
 create mode 100644 vendor/sigs.k8s.io/kustomize/pkg/internal/error/resourceerror.go
 create mode 100644 vendor/sigs.k8s.io/kustomize/pkg/internal/error/secreterror.go
 create mode 100644 vendor/sigs.k8s.io/kustomize/pkg/internal/error/yamlformaterror.go
 create mode 100644 vendor/sigs.k8s.io/kustomize/pkg/loader/fileloader.go
 create mode 100644 vendor/sigs.k8s.io/kustomize/pkg/loader/loader.go
 create mode 100644 vendor/sigs.k8s.io/kustomize/pkg/patch/json6902.go
 create mode 100644 vendor/sigs.k8s.io/kustomize/pkg/patch/strategicmerge.go
 create mode 100644 vendor/sigs.k8s.io/kustomize/pkg/patch/transformer/factory.go
 create mode 100644 vendor/sigs.k8s.io/kustomize/pkg/patch/transformer/patchjson6902json.go
 create mode 100644 vendor/sigs.k8s.io/kustomize/pkg/resid/resid.go
 create mode 100644 vendor/sigs.k8s.io/kustomize/pkg/resmap/factory.go
 create mode 100644 vendor/sigs.k8s.io/kustomize/pkg/resmap/idslice.go
 create mode 100644 vendor/sigs.k8s.io/kustomize/pkg/resmap/resmap.go
 create mode 100644 vendor/sigs.k8s.io/kustomize/pkg/resource/factory.go
 create mode 100644 vendor/sigs.k8s.io/kustomize/pkg/resource/resource.go
 create mode 100644 vendor/sigs.k8s.io/kustomize/pkg/target/kusttarget.go
 create mode 100644 vendor/sigs.k8s.io/kustomize/pkg/target/resaccumulator.go
 create mode 100644 vendor/sigs.k8s.io/kustomize/pkg/transformers/config/defaultconfig/commonannotations.go
 create mode 100644 vendor/sigs.k8s.io/kustomize/pkg/transformers/config/defaultconfig/commonlabels.go
 create mode 100644 vendor/sigs.k8s.io/kustomize/pkg/transformers/config/defaultconfig/defaultconfig.go
 create mode 100644 vendor/sigs.k8s.io/kustomize/pkg/transformers/config/defaultconfig/nameprefix.go
 create mode 100644 vendor/sigs.k8s.io/kustomize/pkg/transformers/config/defaultconfig/namereference.go
 create mode 100644 vendor/sigs.k8s.io/kustomize/pkg/transformers/config/defaultconfig/namespace.go
 create mode 100644 vendor/sigs.k8s.io/kustomize/pkg/transformers/config/defaultconfig/varreference.go
 create mode 100644 vendor/sigs.k8s.io/kustomize/pkg/transformers/config/factory.go
 create mode 100644 vendor/sigs.k8s.io/kustomize/pkg/transformers/config/factorycrd.go
 create mode 100644 vendor/sigs.k8s.io/kustomize/pkg/transformers/config/fieldspec.go
 create mode 100644 vendor/sigs.k8s.io/kustomize/pkg/transformers/config/namebackreferences.go
 create mode 100644 vendor/sigs.k8s.io/kustomize/pkg/transformers/config/transformerconfig.go
 create mode 100644 vendor/sigs.k8s.io/kustomize/pkg/transformers/image.go
 create mode 100644 vendor/sigs.k8s.io/kustomize/pkg/transformers/labelsandannotations.go
 create mode 100644 vendor/sigs.k8s.io/kustomize/pkg/transformers/multitransformer.go
 create mode 100644 vendor/sigs.k8s.io/kustomize/pkg/transformers/mutatefield.go
 create mode 100644 vendor/sigs.k8s.io/kustomize/pkg/transformers/namereference.go
 create mode 100644 vendor/sigs.k8s.io/kustomize/pkg/transformers/namespace.go
 create mode 100644 vendor/sigs.k8s.io/kustomize/pkg/transformers/nooptransformer.go
 create mode 100644 vendor/sigs.k8s.io/kustomize/pkg/transformers/prefixsuffixname.go
 create mode 100644 vendor/sigs.k8s.io/kustomize/pkg/transformers/refvars.go
 create mode 100644 vendor/sigs.k8s.io/kustomize/pkg/transformers/transformer.go
 create mode 100644 vendor/sigs.k8s.io/kustomize/pkg/types/genargs.go
 create mode 100644 vendor/sigs.k8s.io/kustomize/pkg/types/generationbehavior.go
 create mode 100644 vendor/sigs.k8s.io/kustomize/pkg/types/kustomization.go
 create mode 100644 vendor/sigs.k8s.io/kustomize/pkg/types/var.go
 create mode 100644 vendor/sigs.k8s.io/structured-merge-diff/v3/LICENSE
 create mode 100644 vendor/sigs.k8s.io/structured-merge-diff/v3/value/allocator.go
 create mode 100644 vendor/sigs.k8s.io/structured-merge-diff/v3/value/doc.go
 create mode 100644 vendor/sigs.k8s.io/structured-merge-diff/v3/value/fields.go
 create mode 100644 vendor/sigs.k8s.io/structured-merge-diff/v3/value/jsontagutil.go
 create mode 100644 vendor/sigs.k8s.io/structured-merge-diff/v3/value/list.go
 create mode 100644 vendor/sigs.k8s.io/structured-merge-diff/v3/value/listreflect.go
 create mode 100644 vendor/sigs.k8s.io/structured-merge-diff/v3/value/listunstructured.go
 create mode 100644 vendor/sigs.k8s.io/structured-merge-diff/v3/value/map.go
 create mode 100644 vendor/sigs.k8s.io/structured-merge-diff/v3/value/mapreflect.go
 create mode 100644 vendor/sigs.k8s.io/structured-merge-diff/v3/value/mapunstructured.go
 create mode 100644 vendor/sigs.k8s.io/structured-merge-diff/v3/value/reflectcache.go
 create mode 100644 vendor/sigs.k8s.io/structured-merge-diff/v3/value/scalar.go
 create mode 100644 vendor/sigs.k8s.io/structured-merge-diff/v3/value/structreflect.go
 create mode 100644 vendor/sigs.k8s.io/structured-merge-diff/v3/value/value.go
 create mode 100644 vendor/sigs.k8s.io/structured-merge-diff/v3/value/valuereflect.go
 create mode 100644 vendor/sigs.k8s.io/structured-merge-diff/v3/value/valueunstructured.go
 create mode 100644 vendor/sigs.k8s.io/yaml/go.mod
 create mode 100644 vendor/sigs.k8s.io/yaml/go.sum

diff --git a/go.mod b/go.mod
index 550574e0..e3891bbb 100644
--- a/go.mod
+++ b/go.mod
@@ -4,7 +4,7 @@ go 1.12
 
 replace (
 	github.com/knative/pkg => github.com/rancher/pkg v0.0.0-20190514055449-b30ab9de040e
-	k8s.io/client-go => k8s.io/client-go v0.17.2
+	k8s.io/client-go => k8s.io/client-go v0.18.0
 )
 
 require (
@@ -25,19 +25,19 @@ require (
 	github.com/mcuadros/go-version v0.0.0-20180611085657-6d5863ca60fa
 	github.com/morikuni/aec v0.0.0-20170113033406-39771216ff4c // indirect
 	github.com/pkg/errors v0.8.1
-	github.com/rancher/norman v0.0.0-20200312033725-5c74e1ee1e6d
-	github.com/rancher/types v0.0.0-20200326224903-b4612bd96d9b
+	github.com/rancher/norman v0.0.0-20200326201949-eb806263e8ad
+	github.com/rancher/types v0.0.0-20200326224235-0d1e1dcc8d55
 	github.com/sirupsen/logrus v1.4.2
 	github.com/stretchr/testify v1.4.0
 	github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5 // indirect
 	github.com/urfave/cli v1.20.0
-	golang.org/x/crypto v0.0.0-20191112222119-e1110fd1c708
+	golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975
 	golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e
-	gopkg.in/yaml.v2 v2.2.5
-	k8s.io/api v0.17.2
-	k8s.io/apimachinery v0.17.2
-	k8s.io/apiserver v0.17.2
+	gopkg.in/yaml.v2 v2.2.8
+	k8s.io/api v0.18.0
+	k8s.io/apimachinery v0.18.0
+	k8s.io/apiserver v0.18.0
 	k8s.io/client-go v12.0.0+incompatible
-	k8s.io/kubectl v0.17.2
-	sigs.k8s.io/yaml v1.1.0
+	k8s.io/kubectl v0.18.0
+	sigs.k8s.io/yaml v1.2.0
 )
diff --git a/go.sum b/go.sum
index 3fbb4816..076c9f0b 100644
--- a/go.sum
+++ b/go.sum
@@ -49,6 +49,7 @@ github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03
 github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
 github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ=
 github.com/Jeffail/gabs v1.1.1/go.mod h1:6xMvQMK4k33lb7GUUpaAPh6nKMmemQeg5d4gn7/bOXc=
+github.com/MakeNowJust/heredoc v0.0.0-20170808103936-bb23615498cd h1:sjQovDkwrZp8u+gxLtPgKGjk5hCxuy2hrRejBTA9xFU=
 github.com/MakeNowJust/heredoc v0.0.0-20170808103936-bb23615498cd/go.mod h1:64YHyfSL2R96J44Nlwm39UHepQbyR5q10x7iYa1ks2E=
 github.com/Masterminds/goutils v1.1.0 h1:zukEsf/1JZwCMgHiK3GZftabmxiCw4apj3a28RPBiVg=
 github.com/Masterminds/goutils v1.1.0/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU=
@@ -65,8 +66,10 @@ github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAE
 github.com/OneOfOne/xxhash v1.2.6/go.mod h1:eZbhyaAYD41SGSSsnmcpxVoRiQ/MPUTjUdIIOT9Um7Q=
 github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
 github.com/PuerkitoBio/purell v1.1.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
+github.com/PuerkitoBio/purell v1.1.1 h1:WEQqlqaGbrPkxLJWfBwQmfEAE1Z7ONdDLqrN38tNFfI=
 github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
 github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
+github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV/sSk/8dngufqelfh6jnri85riMAaF/M=
 github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
 github.com/SAP/go-hdb v0.14.1/go.mod h1:7fdQLVC2lER3urZLjZCm0AuMQfApof92n3aylBPEkMo=
 github.com/SermoDigital/jose v0.9.1/go.mod h1:ARgCUhI1MHQH+ONky/PAtmVHQrP5JlGY0F3poXOp/fA=
@@ -174,6 +177,7 @@ github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5Xh
 github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
 github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw=
 github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
+github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96 h1:cenwrSVm+Z7QLSV/BsnenAOcDXdX4cMv4wP0B/5QbPg=
 github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM=
 github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE=
 github.com/duosecurity/duo_api_golang v0.0.0-20190308151101-6c680f768e74/go.mod h1:UqXY1lYT/ERa4OEAywUqdok1T4RCRdArkhic1Opuavo=
@@ -188,14 +192,19 @@ github.com/elastic/go-sysinfo v1.1.1/go.mod h1:i1ZYdU10oLNfRzq4vq62BEwD2fH8KaWh6
 github.com/elastic/go-windows v1.0.0/go.mod h1:TsU0Nrp7/y3+VwE82FoZF8gC/XFg/Elz6CcloAxnPgU=
 github.com/elastic/go-windows v1.0.1/go.mod h1:FoVvqWSun28vaDQPbj2Elfc0JahhPB7WQEGa3c814Ss=
 github.com/elazarl/goproxy v0.0.0-20170405201442-c4fc26588b6e/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc=
+github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153 h1:yUdfgN0XgIJw7foRItutHYUIhlcKzcSf5vDpdhQAKTc=
+github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc=
 github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
+github.com/emicklei/go-restful v2.9.5+incompatible h1:spTtZBk5DYEvbxMVutUuTyh1Ao2r4iyvLdACqsl/Ljk=
 github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
 github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
+github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
 github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
 github.com/evanphx/json-patch v4.2.0+incompatible h1:fUDGZCv/7iAN7u0puUVhvKCcsR6vRfwrJatElLBEf0I=
 github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
 github.com/evanphx/json-patch v4.5.0+incompatible h1:ouOWdg56aJriqS0huScTkVXPC5IcNrDCXZ6OoTAWu7M=
 github.com/evanphx/json-patch v4.5.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
+github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d h1:105gxyaGwCFad8crR9dcMQWvV9Hvulu6hwUh4tWPJnM=
 github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d/go.mod h1:ZZMPRZwes7CROmyNKgQzC3XPs6L/G2EJLHddWejkmf4=
 github.com/facette/natsort v0.0.0-20181210072756-2cd4dd1e2dcb/go.mod h1:bH6Xx7IW64qjjJq8M2u4dxNaBiDfKK+z/3eGDpXEQhc=
 github.com/fatih/camelcase v1.0.0/go.mod h1:yN2Sb0lFhZJUdVvtELVWefmrXpuZESvPmqwoZc+/fpc=
@@ -237,12 +246,14 @@ github.com/go-openapi/jsonpointer v0.17.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwds
 github.com/go-openapi/jsonpointer v0.17.2/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M=
 github.com/go-openapi/jsonpointer v0.18.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M=
 github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg=
+github.com/go-openapi/jsonpointer v0.19.3 h1:gihV7YNZK1iK6Tgwwsxo2rJbD1GTbdm72325Bq8FI3w=
 github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
 github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg=
 github.com/go-openapi/jsonreference v0.17.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I=
 github.com/go-openapi/jsonreference v0.17.2/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I=
 github.com/go-openapi/jsonreference v0.18.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I=
 github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc=
+github.com/go-openapi/jsonreference v0.19.3 h1:5cxNfTy0UVC3X8JL5ymxzyoUZmo8iZb+jeTWn7tUa8o=
 github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8=
 github.com/go-openapi/loads v0.17.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU=
 github.com/go-openapi/loads v0.17.2/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU=
@@ -259,6 +270,7 @@ github.com/go-openapi/spec v0.17.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsd
 github.com/go-openapi/spec v0.17.2/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI=
 github.com/go-openapi/spec v0.18.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI=
 github.com/go-openapi/spec v0.19.2/go.mod h1:sCxk3jxKgioEJikev4fgkNmwS+3kuYdJtcsZsD5zxMY=
+github.com/go-openapi/spec v0.19.3 h1:0XRyw8kguri6Yw4SxhsQA/atC88yqrk0+G4YhI2wabc=
 github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo=
 github.com/go-openapi/strfmt v0.17.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU=
 github.com/go-openapi/strfmt v0.17.2/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU=
@@ -272,6 +284,7 @@ github.com/go-openapi/swag v0.17.2/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/
 github.com/go-openapi/swag v0.18.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg=
 github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
 github.com/go-openapi/swag v0.19.4/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
+github.com/go-openapi/swag v0.19.5 h1:lTz6Ys4CmqqCQmZPBlbQENR1/GucA2bzYTE12Pw4tFY=
 github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
 github.com/go-openapi/validate v0.17.2/go.mod h1:Uh4HdOzKt19xGIGm1qHf/ofbX1YQ4Y+MYsct2VUrAJ4=
 github.com/go-openapi/validate v0.18.0/go.mod h1:Uh4HdOzKt19xGIGm1qHf/ofbX1YQ4Y+MYsct2VUrAJ4=
@@ -328,6 +341,8 @@ github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO
 github.com/google/gofuzz v0.0.0-20161122191042-44d81051d367/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI=
 github.com/google/gofuzz v1.0.0 h1:A8PeW59pxE9IoFRqBp37U+mSNaQoZ46F1f0f863XSXw=
 github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
+github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g=
+github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
 github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
 github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
 github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
@@ -342,6 +357,7 @@ github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+
 github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
 github.com/googleapis/gnostic v0.0.0-20170426233943-68f4ded48ba9/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY=
 github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY=
+github.com/googleapis/gnostic v0.1.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY=
 github.com/googleapis/gnostic v0.2.0 h1:l6N3VoaVzTncYYW+9yOz2LJJammFZGBO13sqgEhpy9g=
 github.com/googleapis/gnostic v0.2.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY=
 github.com/googleapis/gnostic v0.3.1 h1:WeAefnSUHlBb0iJKwxFDZdbfGwkd7xRNuV+IpXMJhYk=
@@ -361,6 +377,7 @@ github.com/gorilla/websocket v1.4.0 h1:WDFjx/TMzVgy9VdMMQi2K2Emtwi2QcUQsztZ/zLaH
 github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
 github.com/gotestyourself/gotestyourself v2.2.0+incompatible/go.mod h1:zZKM6oeNM8k+FRljX1mnzVYeS8wiGgQyvST1/GafPbY=
 github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
+github.com/gregjones/httpcache v0.0.0-20190212212710-3befbb6ad0cc h1:f8eY6cV/x1x+HLjOp4r72s/31/V2aTUtg5oKRRPf8/Q=
 github.com/gregjones/httpcache v0.0.0-20190212212710-3befbb6ad0cc/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
 github.com/grpc-ecosystem/go-grpc-middleware v0.0.0-20190222133341-cfaf5686ec79/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
 github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4 h1:z53tR0945TRRQO/fLEVPI6SMv7ZflF0TEaTAoU7tOzg=
@@ -427,6 +444,7 @@ github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJ
 github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
 github.com/imdario/mergo v0.3.7 h1:Y+UAYTZ7gDEuOfhxKWy+dvb5dRQ6rJjFSdX2HZY1/gI=
 github.com/imdario/mergo v0.3.7/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
+github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM=
 github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
 github.com/influxdata/influxdb v1.7.7/go.mod h1:qZna6X/4elxqT3yI9iZYdZrWWdeFOOprn86kgg4+IzY=
 github.com/jefferai/jsonx v1.0.0/go.mod h1:OGmqmi2tTeI/PS+qQfBDToLHHJIy/RMp24fPo8vFvoQ=
@@ -472,6 +490,7 @@ github.com/kylelemons/godebug v0.0.0-20160406211939-eadb3ce320cb/go.mod h1:B69LE
 github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k=
 github.com/leanovate/gopter v0.2.4/go.mod h1:gNcbPWNEWRe4lm+bycKqxUYoH5uoVje5SkOJ3uoLer8=
 github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
+github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de h1:9TO3cAIGXtEhnIaL+V+BEER86oLrvS+kWobKpbJuye0=
 github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de/go.mod h1:zAbeS9B/r2mtpb6U+EI2rYA5OAXxsYw6wTamcNW+zcE=
 github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM=
 github.com/lightstep/lightstep-tracer-go v0.18.0/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4=
@@ -483,6 +502,7 @@ github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN
 github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
 github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
 github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
+github.com/mailru/easyjson v0.7.0 h1:aizVhC/NAAcKWb+5QsU1iNOZb4Yws5UO2I+aIprQITM=
 github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs=
 github.com/markbates/inflect v1.0.4/go.mod h1:1fR9+pO2KHEO9ZRtto13gDwwZaAKstQzferVeWqbgNs=
 github.com/maruel/panicparse v0.0.0-20171209025017-c0182c169410/go.mod h1:nty42YY5QByNC5MM7q/nj938VbgPU7avs45z6NClpxI=
@@ -524,6 +544,7 @@ github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrk
 github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
 github.com/mitchellh/go-testing-interface v0.0.0-20171004221916-a61a99592b77/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI=
 github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI=
+github.com/mitchellh/go-wordwrap v1.0.0 h1:6GlHJ/LTGMrIJbwgdqdl2eEH8o+Exx/0m8ir9Gns0u4=
 github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo=
 github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg=
 github.com/mitchellh/hashstructure v0.0.0-20170609045927-2bca23e0e452/go.mod h1:QjSHrPWS+BGUVBYkbTZWEnOh3G1DutKwClXU/ABz6AQ=
@@ -564,6 +585,8 @@ github.com/onsi/ginkgo v1.10.1 h1:q/mM8GF/n0shIN8SaAZ0V+jnLPzen6WIVZdiwrRlMlo=
 github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
 github.com/onsi/ginkgo v1.10.3 h1:OoxbjfXVZyod1fmWYhI7SEyaD8B00ynP3T+D5GiyHOY=
 github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
+github.com/onsi/ginkgo v1.11.0 h1:JAKSXpt1YjtLA7YpPiqO9ss6sNXEsPfSGdwN0UHqzrw=
+github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
 github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
 github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
 github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
@@ -588,6 +611,7 @@ github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144T
 github.com/patrickmn/go-cache v2.1.0+incompatible/go.mod h1:3Qf8kWWT7OJRJbdiICTKqZju1ZixQ/KpMGzzAfe6+WQ=
 github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k=
 github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
+github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI=
 github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU=
 github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
 github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
@@ -634,15 +658,17 @@ github.com/prometheus/prometheus v1.8.2-0.20200107122003-4708915ac6ef/go.mod h1:
 github.com/prometheus/prometheus v2.3.2+incompatible/go.mod h1:oAIUtOny2rjMX0OWN5vPR5/q/twIROJvdqnQKDdil/s=
 github.com/rancher/norman v0.0.0-20200211155126-fc45a55d4dfd h1:96iahn2n4qq7EuJ6QNrW2iqm1xVUktzjHSyNPPm/6YU=
 github.com/rancher/norman v0.0.0-20200211155126-fc45a55d4dfd/go.mod h1:b+483H276jRBXYosdWrNKFpxH+JYMs3UIdlV60dhdg0=
-github.com/rancher/norman v0.0.0-20200312033725-5c74e1ee1e6d h1:XrZkPfanv6m1OcnqUzXqn4GmLA7vJ6eHJF7VGDl5ses=
-github.com/rancher/norman v0.0.0-20200312033725-5c74e1ee1e6d/go.mod h1:b+483H276jRBXYosdWrNKFpxH+JYMs3UIdlV60dhdg0=
+github.com/rancher/norman v0.0.0-20200326201949-eb806263e8ad h1:Ha6G8j9yfKrvJQeKmPpNzyloJ+98GuD4a3zMwASlgbs=
+github.com/rancher/norman v0.0.0-20200326201949-eb806263e8ad/go.mod h1:g72A7RbW+GCqXicpQumVjmyHz2tP9AiiuRGRm+Ewnyo=
 github.com/rancher/pkg v0.0.0-20190514055449-b30ab9de040e h1:j6+HqCET/NLPBtew2m5apL7jWw/PStQ7iGwXjgAqdvo=
 github.com/rancher/pkg v0.0.0-20190514055449-b30ab9de040e/go.mod h1:XbYHTPaXuw8ZY9bylhYKQh/nJxDaTKk3YhAxPl4Qy/k=
-github.com/rancher/types v0.0.0-20200326224903-b4612bd96d9b h1:nLJOQuk36vCXFQuD03L5Fh9xpF9n9U7/76WNGDjOjeY=
-github.com/rancher/types v0.0.0-20200326224903-b4612bd96d9b/go.mod h1:k5LoTlUpefw0eAzFSJsZI0gf+C4WE41yrc1jm/MS1nM=
+github.com/rancher/types v0.0.0-20200326224235-0d1e1dcc8d55 h1:0F1WNqBBZKIQAUKEjqSw1xGVwdyBoAy0SIJVVkWkKGI=
+github.com/rancher/types v0.0.0-20200326224235-0d1e1dcc8d55/go.mod h1:k5LoTlUpefw0eAzFSJsZI0gf+C4WE41yrc1jm/MS1nM=
 github.com/rancher/wrangler v0.4.1/go.mod h1:1cR91WLhZgkZ+U4fV9nVuXqKurWbgXcIReU4wnQvTN8=
 github.com/rancher/wrangler v0.5.0 h1:zTchAfY9DzchLvXpRpQuNB0PbNfl/HSuvFL1wHN6mDU=
 github.com/rancher/wrangler v0.5.0/go.mod h1:txHSBkPtVgNH/0pUCvdP0Ak0HptAOc9ffBmFxQnL4z4=
+github.com/rancher/wrangler v0.5.4-0.20200326191509-4054411d9736 h1:hqpVLgNUxU5sQUV6SzJPMY8Fy7T9Qht2QkA2Q7O/SH0=
+github.com/rancher/wrangler v0.5.4-0.20200326191509-4054411d9736/go.mod h1:L4HtjPeX8iqLgsxfJgz+JjKMcX2q3qbRXSeTlC/CSd4=
 github.com/rancher/wrangler-api v0.5.0/go.mod h1:Ne7fjNRBDdUYPqltLUCW8eiaQwuKXIyAJH6wsuGK80w=
 github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
 github.com/remyoudompheng/bigfft v0.0.0-20170806203942-52369c62f446/go.mod h1:uYEyJGbgTkfkS4+E/PavXkNJcbFIpEtjt2B0KDQ5+9M=
@@ -650,6 +676,7 @@ github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6So
 github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
 github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
 github.com/rs/cors v1.6.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU=
+github.com/russross/blackfriday v1.5.2 h1:HyvC0ARfnZBqnXwABFeSZHpKvJHJJfPz81GNueLj0oo=
 github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
 github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
 github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
@@ -686,6 +713,7 @@ github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTd
 github.com/spf13/cast v1.3.0 h1:oget//CVOEoFewqQxwr0Ej5yjygnqGkvggSE/gB35Q8=
 github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
 github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
+github.com/spf13/cobra v0.0.5 h1:f0B+LkLX6DtmRH1isoNA9VTtNUK9K8xYd28JNNfOv/s=
 github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU=
 github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
 github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
@@ -770,6 +798,8 @@ golang.org/x/crypto v0.0.0-20190911031432-227b76d455e7/go.mod h1:yigFU9vqHzYiE8U
 golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY=
 golang.org/x/crypto v0.0.0-20191112222119-e1110fd1c708 h1:pXVtWnwHkrWD9ru3sDxY/qFK/bfc0egRovX91EjWjf4=
 golang.org/x/crypto v0.0.0-20191112222119-e1110fd1c708/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
+golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975 h1:/Tl7pH94bvbAAHBdZJT947M/+gp0+CqQXDtMRC0fseo=
+golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
 golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
 golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
 golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
@@ -869,6 +899,7 @@ golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7w
 golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
 golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
 golang.org/x/sys v0.0.0-20191010194322-b09406accb47/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191022100944-742c48ecaeb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
 golang.org/x/sys v0.0.0-20191025021431-6c3a3bfe00ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
 golang.org/x/sys v0.0.0-20191112214154-59a1497f0cea/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
 golang.org/x/sys v0.0.0-20191113165036-4c7a9d0fe056/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@@ -973,6 +1004,8 @@ google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyac
 google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA=
 google.golang.org/grpc v1.25.1 h1:wdKvqQk7IttEw92GoRyKG2IDrUIpgpj6H6m81yfeMW0=
 google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
+google.golang.org/grpc v1.26.0 h1:2dTRdpdFEEhJYQD8EMLB61nnrzSCTbG38PhqdhvOltg=
+google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
 gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
 gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
 gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
@@ -1005,6 +1038,8 @@ gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
 gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
 gopkg.in/yaml.v2 v2.2.5 h1:ymVxjfMaHvXD8RqPRmzHHsB3VvucivSkIAvJFDI5O3c=
 gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10=
+gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
 gopkg.in/yaml.v3 v3.0.0-20190905181640-827449938966/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
 gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo=
 gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw=
@@ -1024,11 +1059,14 @@ k8s.io/api v0.0.0-20191115095533-47f6de673b26/go.mod h1:iA/8arsvelvo4IDqIhX4IbjT
 k8s.io/api v0.17.0/go.mod h1:npsyOePkeP0CPwyGfXDHxvypiYMJxBWAMpQxCaJ4ZxI=
 k8s.io/api v0.17.2 h1:NF1UFXcKN7/OOv1uxdRz3qfra8AHsPav5M93hlV9+Dc=
 k8s.io/api v0.17.2/go.mod h1:BS9fjjLc4CMuqfSO8vgbHPKMt5+SF0ET6u/RVDihTo4=
+k8s.io/api v0.18.0 h1:lwYk8Vt7rsVTwjRU6pzEsa9YNhThbmbocQlKvNBB4EQ=
+k8s.io/api v0.18.0/go.mod h1:q2HRQkfDzHMBZL9l/y9rH63PkQl4vae0xRT+8prbrK8=
 k8s.io/apiextensions-apiserver v0.0.0-20190918161926-8f644eb6e783/go.mod h1:xvae1SZB3E17UpV59AWc271W/Ph25N+bjPyR63X6tPY=
 k8s.io/apiextensions-apiserver v0.0.0-20191016113550-5357c4baaf65/go.mod h1:5BINdGqggRXXKnDgpwoJ7PyQH8f+Ypp02fvVNcIFy9s=
 k8s.io/apiextensions-apiserver v0.0.0-20191114105449-027877536833/go.mod h1:Gb1G2W/kXMizbVTnA9oh2ybQ4cM3COr3r5JDj+DzKGw=
 k8s.io/apiextensions-apiserver v0.17.0/go.mod h1:XiIFUakZywkUl54fVXa7QTEHcqQz9HG55nHd1DCoHj8=
 k8s.io/apiextensions-apiserver v0.17.2/go.mod h1:4KdMpjkEjjDI2pPfBA15OscyNldHWdBCfsWMDWAmSTs=
+k8s.io/apiextensions-apiserver v0.18.0/go.mod h1:18Cwn1Xws4xnWQNC00FLq1E350b9lUF+aOdIWDOZxgo=
 k8s.io/apimachinery v0.0.0-20181127025237-2b1284ed4c93/go.mod h1:ccL7Eh7zubPUSh9A3USN90/OzHNSVN6zxzde07TDCL0=
 k8s.io/apimachinery v0.0.0-20190809020650-423f5d784010/go.mod h1:Waf/xTS2FGRrgXCkO5FP3XxTOWh0qLf2QhL1qFZZ/R8=
 k8s.io/apimachinery v0.0.0-20190913080033-27d36303b655/go.mod h1:nL6pwRT8NgfF8TT68DBI8uEePRt89cSvoXUVqbkWHq4=
@@ -1038,30 +1076,40 @@ k8s.io/apimachinery v0.0.0-20191115015347-3c7067801da2/go.mod h1:dXFS2zaQR8fyzuv
 k8s.io/apimachinery v0.17.0/go.mod h1:b9qmWdKlLuU9EBh+06BtLcSf/Mu89rWL33naRxs1uZg=
 k8s.io/apimachinery v0.17.2 h1:hwDQQFbdRlpnnsR64Asdi55GyCaIP/3WQpMmbNBeWr4=
 k8s.io/apimachinery v0.17.2/go.mod h1:b9qmWdKlLuU9EBh+06BtLcSf/Mu89rWL33naRxs1uZg=
+k8s.io/apimachinery v0.18.0 h1:fuPfYpk3cs1Okp/515pAf0dNhL66+8zk8RLbSX+EgAE=
+k8s.io/apimachinery v0.18.0/go.mod h1:9SnR/e11v5IbyPCGbvJViimtJ0SwHG4nfZFjU77ftcA=
 k8s.io/apiserver v0.0.0-20190918160949-bfa5e2e684ad/go.mod h1:XPCXEwhjaFN29a8NldXA901ElnKeKLrLtREO9ZhFyhg=
 k8s.io/apiserver v0.0.0-20191016112112-5190913f932d/go.mod h1:7OqfAolfWxUM/jJ/HBLyE+cdaWFBUoo5Q5pHgJVj2ws=
 k8s.io/apiserver v0.0.0-20191114103151-9ca1dc586682/go.mod h1:Idob8Va6/sMX5SmwPLsU0pdvFlkwxuJ5x+fXMG8NbKE=
 k8s.io/apiserver v0.17.0/go.mod h1:ABM+9x/prjINN6iiffRVNCBR2Wk7uY4z+EtEGZD48cg=
 k8s.io/apiserver v0.17.2 h1:NssVvPALll6SSeNgo1Wk1h2myU1UHNwmhxV0Oxbcl8Y=
 k8s.io/apiserver v0.17.2/go.mod h1:lBmw/TtQdtxvrTk0e2cgtOxHizXI+d0mmGQURIHQZlo=
-k8s.io/cli-runtime v0.17.2/go.mod h1:aa8t9ziyQdbkuizkNLAw3qe3srSyWh9zlSB7zTqRNPI=
-k8s.io/client-go v0.17.2 h1:ndIfkfXEGrNhLIgkr0+qhRguSD3u6DCmonepn1O6NYc=
-k8s.io/client-go v0.17.2/go.mod h1:QAzRgsa0C2xl4/eVpeVAZMvikCn8Nm81yqVx3Kk9XYI=
+k8s.io/apiserver v0.18.0 h1:ELAWpGWC6XdbRLi5lwAbEbvksD7hkXxPdxaJsdpist4=
+k8s.io/apiserver v0.18.0/go.mod h1:3S2O6FeBBd6XTo0njUrLxiqk8GNy6wWOftjhJcXYnjw=
+k8s.io/cli-runtime v0.18.0 h1:jG8XpSqQ5TrV0N+EZ3PFz6+gqlCk71dkggWCCq9Mq34=
+k8s.io/cli-runtime v0.18.0/go.mod h1:1eXfmBsIJosjn9LjEBUd2WVPoPAY9XGTqTFcPMIBsUQ=
+k8s.io/client-go v0.18.0 h1:yqKw4cTUQraZK3fcVCMeSa+lqKwcjZ5wtcOIPnxQno4=
+k8s.io/client-go v0.18.0/go.mod h1:uQSYDYs4WhVZ9i6AIoEZuwUggLVEF64HOD37boKAtF8=
 k8s.io/code-generator v0.0.0-20181114232248-ae218e241252/go.mod h1:IPqxl/YHk05nodzupwjke6ctMjyNRdV2zZ5/j3/F204=
 k8s.io/code-generator v0.0.0-20190912054826-cd179ad6a269/go.mod h1:V5BD6M4CyaN5m+VthcclXWsVcT1Hu+glwa1bi3MIsyE=
 k8s.io/code-generator v0.0.0-20191004115455-8e001e5d1894/go.mod h1:mJUgkl06XV4kstAnLHAIzJPVCOzVR+ZcfPIv4fUsFCY=
 k8s.io/code-generator v0.17.0/go.mod h1:DVmfPQgxQENqDIzVR2ddLXMH34qeszkKSdH/N+s+38s=
 k8s.io/code-generator v0.17.2/go.mod h1:DVmfPQgxQENqDIzVR2ddLXMH34qeszkKSdH/N+s+38s=
+k8s.io/code-generator v0.18.0/go.mod h1:+UHX5rSbxmR8kzS+FAv7um6dtYrZokQvjHpDSYRVkTc=
 k8s.io/component-base v0.0.0-20190918160511-547f6c5d7090/go.mod h1:933PBGtQFJky3TEwYx4aEPZ4IxqhWh3R6DCmzqIn1hA=
 k8s.io/component-base v0.0.0-20191016111319-039242c015a9/go.mod h1:SuWowIgd/dtU/m/iv8OD9eOxp3QZBBhTIiWMsBQvKjI=
 k8s.io/component-base v0.0.0-20191114102325-35a9586014f7/go.mod h1:9rNMvrwbqPF4MxI+VQYETrWqMKxi8yAd8YZLdSJ9EDw=
 k8s.io/component-base v0.17.0/go.mod h1:rKuRAokNMY2nn2A6LP/MiwpoaMRHpfRnrPaUJJj1Yoc=
 k8s.io/component-base v0.17.2/go.mod h1:zMPW3g5aH7cHJpKYQ/ZsGMcgbsA/VyhEugF3QT1awLs=
+k8s.io/component-base v0.18.0 h1:I+lP0fNfsEdTDpHaL61bCAqTZLoiWjEEP304Mo5ZQgE=
+k8s.io/component-base v0.18.0/go.mod h1:u3BCg0z1uskkzrnAKFzulmYaEpZF7XC9Pf/uFyb1v2c=
 k8s.io/gengo v0.0.0-20181106084056-51747d6e00da/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
 k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
 k8s.io/gengo v0.0.0-20190822140433-26a664648505/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
 k8s.io/gengo v0.0.0-20191120174120-e74f70b9b27e h1:HqlU9dKk5YVs7R84jmq6U3Wo/XslpkxHpBv2iWHLtLc=
 k8s.io/gengo v0.0.0-20191120174120-e74f70b9b27e/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
+k8s.io/gengo v0.0.0-20200114144118-36b2048a9120 h1:RPscN6KhmG54S33L+lr3GS+oD1jmchIU0ll519K6FA4=
+k8s.io/gengo v0.0.0-20200114144118-36b2048a9120/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
 k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk=
 k8s.io/klog v0.0.0-20190306015804-8e90cee79f82/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk=
 k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk=
@@ -1079,14 +1127,18 @@ k8s.io/kube-openapi v0.0.0-20190709113604-33be087ad058/go.mod h1:nfDlWeOsu3pUf4y
 k8s.io/kube-openapi v0.0.0-20190816220812-743ec37842bf/go.mod h1:1TqjTSzOxsLGIKfj0lK8EeCP7K1iUG65v09OM0/WG5E=
 k8s.io/kube-openapi v0.0.0-20191107075043-30be4d16710a h1:UcxjrRMyNx/i/y8G7kPvLyy7rfbeuf1PYyBf973pgyU=
 k8s.io/kube-openapi v0.0.0-20191107075043-30be4d16710a/go.mod h1:1TqjTSzOxsLGIKfj0lK8EeCP7K1iUG65v09OM0/WG5E=
-k8s.io/kubectl v0.17.2 h1:QZR8Q6lWiVRjwKslekdbN5WPMp53dS/17j5e+oi5XVU=
-k8s.io/kubectl v0.17.2/go.mod h1:y4rfLV0n6aPmvbRCqZQjvOp3ezxsFgpqL+zF5jH/lxk=
-k8s.io/metrics v0.17.2/go.mod h1:3TkNHET4ROd+NfzNxkjoVfQ0Ob4iZnaHmSEA4vYpwLw=
+k8s.io/kube-openapi v0.0.0-20200121204235-bf4fb3bd569c h1:/KUFqjjqAcY4Us6luF5RDNZ16KJtb49HfR3ZHB9qYXM=
+k8s.io/kube-openapi v0.0.0-20200121204235-bf4fb3bd569c/go.mod h1:GRQhZsXIAJ1xR0C9bd8UpWHZ5plfAS9fzPjJuQ6JL3E=
+k8s.io/kubectl v0.18.0 h1:hu52Ndq/d099YW+3sS3VARxFz61Wheiq8K9S7oa82Dk=
+k8s.io/kubectl v0.18.0/go.mod h1:LOkWx9Z5DXMEg5KtOjHhRiC1fqJPLyCr3KtQgEolCkU=
+k8s.io/metrics v0.18.0/go.mod h1:8aYTW18koXqjLVKL7Ds05RPMX9ipJZI3mywYvBOxXd4=
 k8s.io/utils v0.0.0-20190801114015-581e00157fb1/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew=
 k8s.io/utils v0.0.0-20191114184206-e782cd3c129f h1:GiPwtSzdP43eI1hpPCbROQCCIgCuiMMNF8YUVLF3vJo=
 k8s.io/utils v0.0.0-20191114184206-e782cd3c129f/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew=
 k8s.io/utils v0.0.0-20191114200735-6ca3b61696b6 h1:p0Ai3qVtkbCG/Af26dBmU0E1W58NID3hSSh7cMyylpM=
 k8s.io/utils v0.0.0-20191114200735-6ca3b61696b6/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew=
+k8s.io/utils v0.0.0-20200324210504-a9aa75ae1b89 h1:d4vVOjXm687F1iLSP2q3lyPPuyvTUt3aVoBpi2DqRsU=
+k8s.io/utils v0.0.0-20200324210504-a9aa75ae1b89/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew=
 knative.dev/pkg v0.0.0-20191024223035-2a3fc371d326/go.mod h1:pgODObA1dTyhNoFxPZTTjNWfx6F0aKsKzn+vaT9XO/Q=
 launchpad.net/gocheck v0.0.0-20140225173054-000000000087/go.mod h1:hj7XX3B/0A+80Vse0e+BUHsHMTEhd0O4cpUHr/e/BUM=
 modernc.org/cc v1.0.0/go.mod h1:1Sk4//wdnYJiUIxnW8ddKpaOJCF37yAdqYnkxUpaYxw=
@@ -1095,16 +1147,24 @@ modernc.org/mathutil v1.0.0/go.mod h1:wU0vUrJsVWBZ4P6e7xtFJEhFSNsfRLJ8H458uRjg03
 modernc.org/strutil v1.0.0/go.mod h1:lstksw84oURvj9y3tn8lGvRxyRC1S2+g5uuIzNfIOBs=
 modernc.org/xc v1.0.0/go.mod h1:mRNCo0bvLjGhHO9WsyuKVU4q0ceiDDDoEeWDJHrNx8I=
 rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
+sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.7/go.mod h1:PHgbrJT7lCHcxMU+mDHEm+nx46H4zuuHZkDP6icnhu0=
 sigs.k8s.io/controller-runtime v0.3.1-0.20191022174215-ad57a976ffa1/go.mod h1:p2vzQ3RuSVv9YR4AcM0y8TKHQA+0oLXazKFt6Z0OdS8=
 sigs.k8s.io/controller-tools v0.2.2/go.mod h1:8SNGuj163x/sMwydREj7ld5mIMJu1cDanIfnx6xsU70=
 sigs.k8s.io/controller-tools v0.2.4/go.mod h1:m/ztfQNocGYBgTTCmFdnK94uVvgxeZeE3LtJvd/jIzA=
+sigs.k8s.io/kustomize v2.0.3+incompatible h1:JUufWFNlI44MdtnjUqVnvh29rR37PQFzPbLXqhyOyX0=
 sigs.k8s.io/kustomize v2.0.3+incompatible/go.mod h1:MkjgH3RdOWrievjo6c9T245dYlB5QeXV4WCbnt/PEpU=
 sigs.k8s.io/structured-merge-diff v0.0.0-20190426204423-ea680f03cc65/go.mod h1:wWxsB5ozmmv/SG7nM11ayaAW51xMvak/t1r0CSlcokI=
 sigs.k8s.io/structured-merge-diff v0.0.0-20190525122527-15d366b2352e/go.mod h1:wWxsB5ozmmv/SG7nM11ayaAW51xMvak/t1r0CSlcokI=
 sigs.k8s.io/structured-merge-diff v0.0.0-20190817042607-6149e4549fca/go.mod h1:IIgPezJWb76P0hotTxzDbWsMYB8APh18qZnxkomBpxA=
+sigs.k8s.io/structured-merge-diff v1.0.1-0.20191108220359-b1b620dd3f06 h1:zD2IemQ4LmOcAumeiyDWXKUI2SO0NYDe3H6QGvPOVgU=
 sigs.k8s.io/structured-merge-diff v1.0.1-0.20191108220359-b1b620dd3f06/go.mod h1:/ULNhyfzRopfcjskuui0cTITekDduZ7ycKN3oUT9R18=
+sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200116222232-67a7b8c61874/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw=
+sigs.k8s.io/structured-merge-diff/v3 v3.0.0 h1:dOmIZBMfhcHS09XZkMyUgkq5trg3/jRyJYFZUiaOp8E=
+sigs.k8s.io/structured-merge-diff/v3 v3.0.0/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw=
 sigs.k8s.io/testing_frameworks v0.1.1/go.mod h1:VVBKrHmJ6Ekkfz284YKhQePcdycOzNH9qL6ht1zEr/U=
 sigs.k8s.io/yaml v1.1.0 h1:4A07+ZFc2wgJwo8YNlQpr1rVlgUDlxXHhPJciaPY5gs=
 sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o=
+sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q=
+sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc=
 software.sslmate.com/src/go-pkcs12 v0.0.0-20180114231543-2291e8f0f237/go.mod h1:/xvNRWUqm0+/ZMiF4EX00vrSCMsE4/NHb+Pt3freEeQ=
 vbom.ml/util v0.0.0-20160121211510-db5cfe13f5cc/go.mod h1:so/NYdZXCz+E3ZpW0uAoCj6uzU2+8OWDFv/HxUSs7kI=
diff --git a/vendor/github.com/Azure/go-ansiterm/LICENSE b/vendor/github.com/Azure/go-ansiterm/LICENSE
new file mode 100644
index 00000000..e3d9a64d
--- /dev/null
+++ b/vendor/github.com/Azure/go-ansiterm/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2015 Microsoft Corporation
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/vendor/github.com/Azure/go-ansiterm/README.md b/vendor/github.com/Azure/go-ansiterm/README.md
new file mode 100644
index 00000000..261c041e
--- /dev/null
+++ b/vendor/github.com/Azure/go-ansiterm/README.md
@@ -0,0 +1,12 @@
+# go-ansiterm
+
+This is a cross platform Ansi Terminal Emulation library.  It reads a stream of Ansi characters and produces the appropriate function calls.  The results of the function calls are platform dependent.
+
+For example the parser might receive "ESC, [, A" as a stream of three characters.  This is the code for Cursor Up (http://www.vt100.net/docs/vt510-rm/CUU).  The parser then calls the cursor up function (CUU()) on an event handler.  The event handler determines what platform specific work must be done to cause the cursor to move up one position.
+
+The parser (parser.go) is a partial implementation of this state machine (http://vt100.net/emu/vt500_parser.png).  There are also two event handler implementations, one for tests (test_event_handler.go) to validate that the expected events are being produced and called, the other is a Windows implementation (winterm/win_event_handler.go).
+
+See parser_test.go for examples exercising the state machine and generating appropriate function calls.
+
+-----
+This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments.
diff --git a/vendor/github.com/Azure/go-ansiterm/constants.go b/vendor/github.com/Azure/go-ansiterm/constants.go
new file mode 100644
index 00000000..96504a33
--- /dev/null
+++ b/vendor/github.com/Azure/go-ansiterm/constants.go
@@ -0,0 +1,188 @@
+package ansiterm
+
+const LogEnv = "DEBUG_TERMINAL"
+
+// ANSI constants
+// References:
+// -- http://www.ecma-international.org/publications/standards/Ecma-048.htm
+// -- http://man7.org/linux/man-pages/man4/console_codes.4.html
+// -- http://manpages.ubuntu.com/manpages/intrepid/man4/console_codes.4.html
+// -- http://en.wikipedia.org/wiki/ANSI_escape_code
+// -- http://vt100.net/emu/dec_ansi_parser
+// -- http://vt100.net/emu/vt500_parser.svg
+// -- http://invisible-island.net/xterm/ctlseqs/ctlseqs.html
+// -- http://www.inwap.com/pdp10/ansicode.txt
+const (
+	// ECMA-48 Set Graphics Rendition
+	// Note:
+	// -- Constants leading with an underscore (e.g., _ANSI_xxx) are unsupported or reserved
+	// -- Fonts could possibly be supported via SetCurrentConsoleFontEx
+	// -- Windows does not expose the per-window cursor (i.e., caret) blink times
+	ANSI_SGR_RESET              = 0
+	ANSI_SGR_BOLD               = 1
+	ANSI_SGR_DIM                = 2
+	_ANSI_SGR_ITALIC            = 3
+	ANSI_SGR_UNDERLINE          = 4
+	_ANSI_SGR_BLINKSLOW         = 5
+	_ANSI_SGR_BLINKFAST         = 6
+	ANSI_SGR_REVERSE            = 7
+	_ANSI_SGR_INVISIBLE         = 8
+	_ANSI_SGR_LINETHROUGH       = 9
+	_ANSI_SGR_FONT_00           = 10
+	_ANSI_SGR_FONT_01           = 11
+	_ANSI_SGR_FONT_02           = 12
+	_ANSI_SGR_FONT_03           = 13
+	_ANSI_SGR_FONT_04           = 14
+	_ANSI_SGR_FONT_05           = 15
+	_ANSI_SGR_FONT_06           = 16
+	_ANSI_SGR_FONT_07           = 17
+	_ANSI_SGR_FONT_08           = 18
+	_ANSI_SGR_FONT_09           = 19
+	_ANSI_SGR_FONT_10           = 20
+	_ANSI_SGR_DOUBLEUNDERLINE   = 21
+	ANSI_SGR_BOLD_DIM_OFF       = 22
+	_ANSI_SGR_ITALIC_OFF        = 23
+	ANSI_SGR_UNDERLINE_OFF      = 24
+	_ANSI_SGR_BLINK_OFF         = 25
+	_ANSI_SGR_RESERVED_00       = 26
+	ANSI_SGR_REVERSE_OFF        = 27
+	_ANSI_SGR_INVISIBLE_OFF     = 28
+	_ANSI_SGR_LINETHROUGH_OFF   = 29
+	ANSI_SGR_FOREGROUND_BLACK   = 30
+	ANSI_SGR_FOREGROUND_RED     = 31
+	ANSI_SGR_FOREGROUND_GREEN   = 32
+	ANSI_SGR_FOREGROUND_YELLOW  = 33
+	ANSI_SGR_FOREGROUND_BLUE    = 34
+	ANSI_SGR_FOREGROUND_MAGENTA = 35
+	ANSI_SGR_FOREGROUND_CYAN    = 36
+	ANSI_SGR_FOREGROUND_WHITE   = 37
+	_ANSI_SGR_RESERVED_01       = 38
+	ANSI_SGR_FOREGROUND_DEFAULT = 39
+	ANSI_SGR_BACKGROUND_BLACK   = 40
+	ANSI_SGR_BACKGROUND_RED     = 41
+	ANSI_SGR_BACKGROUND_GREEN   = 42
+	ANSI_SGR_BACKGROUND_YELLOW  = 43
+	ANSI_SGR_BACKGROUND_BLUE    = 44
+	ANSI_SGR_BACKGROUND_MAGENTA = 45
+	ANSI_SGR_BACKGROUND_CYAN    = 46
+	ANSI_SGR_BACKGROUND_WHITE   = 47
+	_ANSI_SGR_RESERVED_02       = 48
+	ANSI_SGR_BACKGROUND_DEFAULT = 49
+	// 50 - 65: Unsupported
+
+	ANSI_MAX_CMD_LENGTH = 4096
+
+	MAX_INPUT_EVENTS = 128
+	DEFAULT_WIDTH    = 80
+	DEFAULT_HEIGHT   = 24
+
+	ANSI_BEL              = 0x07
+	ANSI_BACKSPACE        = 0x08
+	ANSI_TAB              = 0x09
+	ANSI_LINE_FEED        = 0x0A
+	ANSI_VERTICAL_TAB     = 0x0B
+	ANSI_FORM_FEED        = 0x0C
+	ANSI_CARRIAGE_RETURN  = 0x0D
+	ANSI_ESCAPE_PRIMARY   = 0x1B
+	ANSI_ESCAPE_SECONDARY = 0x5B
+	ANSI_OSC_STRING_ENTRY = 0x5D
+	ANSI_COMMAND_FIRST    = 0x40
+	ANSI_COMMAND_LAST     = 0x7E
+	DCS_ENTRY             = 0x90
+	CSI_ENTRY             = 0x9B
+	OSC_STRING            = 0x9D
+	ANSI_PARAMETER_SEP    = ";"
+	ANSI_CMD_G0           = '('
+	ANSI_CMD_G1           = ')'
+	ANSI_CMD_G2           = '*'
+	ANSI_CMD_G3           = '+'
+	ANSI_CMD_DECPNM       = '>'
+	ANSI_CMD_DECPAM       = '='
+	ANSI_CMD_OSC          = ']'
+	ANSI_CMD_STR_TERM     = '\\'
+
+	KEY_CONTROL_PARAM_2 = ";2"
+	KEY_CONTROL_PARAM_3 = ";3"
+	KEY_CONTROL_PARAM_4 = ";4"
+	KEY_CONTROL_PARAM_5 = ";5"
+	KEY_CONTROL_PARAM_6 = ";6"
+	KEY_CONTROL_PARAM_7 = ";7"
+	KEY_CONTROL_PARAM_8 = ";8"
+	KEY_ESC_CSI         = "\x1B["
+	KEY_ESC_N           = "\x1BN"
+	KEY_ESC_O           = "\x1BO"
+
+	FILL_CHARACTER = ' '
+)
+
+func getByteRange(start byte, end byte) []byte {
+	bytes := make([]byte, 0, 32)
+	for i := start; i <= end; i++ {
+		bytes = append(bytes, byte(i))
+	}
+
+	return bytes
+}
+
+var toGroundBytes = getToGroundBytes()
+var executors = getExecuteBytes()
+
+// SPACE		  20+A0 hex  Always and everywhere a blank space
+// Intermediate	  20-2F hex   !"#$%&'()*+,-./
+var intermeds = getByteRange(0x20, 0x2F)
+
+// Parameters	  30-3F hex  0123456789:;<=>?
+// CSI Parameters 30-39, 3B hex 0123456789;
+var csiParams = getByteRange(0x30, 0x3F)
+
+var csiCollectables = append(getByteRange(0x30, 0x39), getByteRange(0x3B, 0x3F)...)
+
+// Uppercase	  40-5F hex  @ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_
+var upperCase = getByteRange(0x40, 0x5F)
+
+// Lowercase	  60-7E hex  `abcdefghijlkmnopqrstuvwxyz{|}~
+var lowerCase = getByteRange(0x60, 0x7E)
+
+// Alphabetics	  40-7E hex  (all of upper and lower case)
+var alphabetics = append(upperCase, lowerCase...)
+
+var printables = getByteRange(0x20, 0x7F)
+
+var escapeIntermediateToGroundBytes = getByteRange(0x30, 0x7E)
+var escapeToGroundBytes = getEscapeToGroundBytes()
+
+// See http://www.vt100.net/emu/vt500_parser.png for description of the complex
+// byte ranges below
+
+func getEscapeToGroundBytes() []byte {
+	escapeToGroundBytes := getByteRange(0x30, 0x4F)
+	escapeToGroundBytes = append(escapeToGroundBytes, getByteRange(0x51, 0x57)...)
+	escapeToGroundBytes = append(escapeToGroundBytes, 0x59)
+	escapeToGroundBytes = append(escapeToGroundBytes, 0x5A)
+	escapeToGroundBytes = append(escapeToGroundBytes, 0x5C)
+	escapeToGroundBytes = append(escapeToGroundBytes, getByteRange(0x60, 0x7E)...)
+	return escapeToGroundBytes
+}
+
+func getExecuteBytes() []byte {
+	executeBytes := getByteRange(0x00, 0x17)
+	executeBytes = append(executeBytes, 0x19)
+	executeBytes = append(executeBytes, getByteRange(0x1C, 0x1F)...)
+	return executeBytes
+}
+
+func getToGroundBytes() []byte {
+	groundBytes := []byte{0x18}
+	groundBytes = append(groundBytes, 0x1A)
+	groundBytes = append(groundBytes, getByteRange(0x80, 0x8F)...)
+	groundBytes = append(groundBytes, getByteRange(0x91, 0x97)...)
+	groundBytes = append(groundBytes, 0x99)
+	groundBytes = append(groundBytes, 0x9A)
+	groundBytes = append(groundBytes, 0x9C)
+	return groundBytes
+}
+
+// Delete		     7F hex  Always and everywhere ignored
+// C1 Control	  80-9F hex  32 additional control characters
+// G1 Displayable A1-FE hex  94 additional displayable characters
+// Special		  A0+FF hex  Same as SPACE and DELETE
diff --git a/vendor/github.com/Azure/go-ansiterm/context.go b/vendor/github.com/Azure/go-ansiterm/context.go
new file mode 100644
index 00000000..8d66e777
--- /dev/null
+++ b/vendor/github.com/Azure/go-ansiterm/context.go
@@ -0,0 +1,7 @@
+package ansiterm
+
+type ansiContext struct {
+	currentChar byte
+	paramBuffer []byte
+	interBuffer []byte
+}
diff --git a/vendor/github.com/Azure/go-ansiterm/csi_entry_state.go b/vendor/github.com/Azure/go-ansiterm/csi_entry_state.go
new file mode 100644
index 00000000..bcbe00d0
--- /dev/null
+++ b/vendor/github.com/Azure/go-ansiterm/csi_entry_state.go
@@ -0,0 +1,49 @@
+package ansiterm
+
+type csiEntryState struct {
+	baseState
+}
+
+func (csiState csiEntryState) Handle(b byte) (s state, e error) {
+	csiState.parser.logf("CsiEntry::Handle %#x", b)
+
+	nextState, err := csiState.baseState.Handle(b)
+	if nextState != nil || err != nil {
+		return nextState, err
+	}
+
+	switch {
+	case sliceContains(alphabetics, b):
+		return csiState.parser.ground, nil
+	case sliceContains(csiCollectables, b):
+		return csiState.parser.csiParam, nil
+	case sliceContains(executors, b):
+		return csiState, csiState.parser.execute()
+	}
+
+	return csiState, nil
+}
+
+func (csiState csiEntryState) Transition(s state) error {
+	csiState.parser.logf("CsiEntry::Transition %s --> %s", csiState.Name(), s.Name())
+	csiState.baseState.Transition(s)
+
+	switch s {
+	case csiState.parser.ground:
+		return csiState.parser.csiDispatch()
+	case csiState.parser.csiParam:
+		switch {
+		case sliceContains(csiParams, csiState.parser.context.currentChar):
+			csiState.parser.collectParam()
+		case sliceContains(intermeds, csiState.parser.context.currentChar):
+			csiState.parser.collectInter()
+		}
+	}
+
+	return nil
+}
+
+func (csiState csiEntryState) Enter() error {
+	csiState.parser.clear()
+	return nil
+}
diff --git a/vendor/github.com/Azure/go-ansiterm/csi_param_state.go b/vendor/github.com/Azure/go-ansiterm/csi_param_state.go
new file mode 100644
index 00000000..7ed5e01c
--- /dev/null
+++ b/vendor/github.com/Azure/go-ansiterm/csi_param_state.go
@@ -0,0 +1,38 @@
+package ansiterm
+
+type csiParamState struct {
+	baseState
+}
+
+func (csiState csiParamState) Handle(b byte) (s state, e error) {
+	csiState.parser.logf("CsiParam::Handle %#x", b)
+
+	nextState, err := csiState.baseState.Handle(b)
+	if nextState != nil || err != nil {
+		return nextState, err
+	}
+
+	switch {
+	case sliceContains(alphabetics, b):
+		return csiState.parser.ground, nil
+	case sliceContains(csiCollectables, b):
+		csiState.parser.collectParam()
+		return csiState, nil
+	case sliceContains(executors, b):
+		return csiState, csiState.parser.execute()
+	}
+
+	return csiState, nil
+}
+
+func (csiState csiParamState) Transition(s state) error {
+	csiState.parser.logf("CsiParam::Transition %s --> %s", csiState.Name(), s.Name())
+	csiState.baseState.Transition(s)
+
+	switch s {
+	case csiState.parser.ground:
+		return csiState.parser.csiDispatch()
+	}
+
+	return nil
+}
diff --git a/vendor/github.com/Azure/go-ansiterm/escape_intermediate_state.go b/vendor/github.com/Azure/go-ansiterm/escape_intermediate_state.go
new file mode 100644
index 00000000..1c719db9
--- /dev/null
+++ b/vendor/github.com/Azure/go-ansiterm/escape_intermediate_state.go
@@ -0,0 +1,36 @@
+package ansiterm
+
+type escapeIntermediateState struct {
+	baseState
+}
+
+func (escState escapeIntermediateState) Handle(b byte) (s state, e error) {
+	escState.parser.logf("escapeIntermediateState::Handle %#x", b)
+	nextState, err := escState.baseState.Handle(b)
+	if nextState != nil || err != nil {
+		return nextState, err
+	}
+
+	switch {
+	case sliceContains(intermeds, b):
+		return escState, escState.parser.collectInter()
+	case sliceContains(executors, b):
+		return escState, escState.parser.execute()
+	case sliceContains(escapeIntermediateToGroundBytes, b):
+		return escState.parser.ground, nil
+	}
+
+	return escState, nil
+}
+
+func (escState escapeIntermediateState) Transition(s state) error {
+	escState.parser.logf("escapeIntermediateState::Transition %s --> %s", escState.Name(), s.Name())
+	escState.baseState.Transition(s)
+
+	switch s {
+	case escState.parser.ground:
+		return escState.parser.escDispatch()
+	}
+
+	return nil
+}
diff --git a/vendor/github.com/Azure/go-ansiterm/escape_state.go b/vendor/github.com/Azure/go-ansiterm/escape_state.go
new file mode 100644
index 00000000..6390abd2
--- /dev/null
+++ b/vendor/github.com/Azure/go-ansiterm/escape_state.go
@@ -0,0 +1,47 @@
+package ansiterm
+
+type escapeState struct {
+	baseState
+}
+
+func (escState escapeState) Handle(b byte) (s state, e error) {
+	escState.parser.logf("escapeState::Handle %#x", b)
+	nextState, err := escState.baseState.Handle(b)
+	if nextState != nil || err != nil {
+		return nextState, err
+	}
+
+	switch {
+	case b == ANSI_ESCAPE_SECONDARY:
+		return escState.parser.csiEntry, nil
+	case b == ANSI_OSC_STRING_ENTRY:
+		return escState.parser.oscString, nil
+	case sliceContains(executors, b):
+		return escState, escState.parser.execute()
+	case sliceContains(escapeToGroundBytes, b):
+		return escState.parser.ground, nil
+	case sliceContains(intermeds, b):
+		return escState.parser.escapeIntermediate, nil
+	}
+
+	return escState, nil
+}
+
+func (escState escapeState) Transition(s state) error {
+	escState.parser.logf("Escape::Transition %s --> %s", escState.Name(), s.Name())
+	escState.baseState.Transition(s)
+
+	switch s {
+	case escState.parser.ground:
+		return escState.parser.escDispatch()
+	case escState.parser.escapeIntermediate:
+		return escState.parser.collectInter()
+	}
+
+	return nil
+}
+
+func (escState escapeState) Enter() error {
+	escState.parser.clear()
+	return nil
+}
diff --git a/vendor/github.com/Azure/go-ansiterm/event_handler.go b/vendor/github.com/Azure/go-ansiterm/event_handler.go
new file mode 100644
index 00000000..98087b38
--- /dev/null
+++ b/vendor/github.com/Azure/go-ansiterm/event_handler.go
@@ -0,0 +1,90 @@
+package ansiterm
+
+type AnsiEventHandler interface {
+	// Print
+	Print(b byte) error
+
+	// Execute C0 commands
+	Execute(b byte) error
+
+	// CUrsor Up
+	CUU(int) error
+
+	// CUrsor Down
+	CUD(int) error
+
+	// CUrsor Forward
+	CUF(int) error
+
+	// CUrsor Backward
+	CUB(int) error
+
+	// Cursor to Next Line
+	CNL(int) error
+
+	// Cursor to Previous Line
+	CPL(int) error
+
+	// Cursor Horizontal position Absolute
+	CHA(int) error
+
+	// Vertical line Position Absolute
+	VPA(int) error
+
+	// CUrsor Position
+	CUP(int, int) error
+
+	// Horizontal and Vertical Position (depends on PUM)
+	HVP(int, int) error
+
+	// Text Cursor Enable Mode
+	DECTCEM(bool) error
+
+	// Origin Mode
+	DECOM(bool) error
+
+	// 132 Column Mode
+	DECCOLM(bool) error
+
+	// Erase in Display
+	ED(int) error
+
+	// Erase in Line
+	EL(int) error
+
+	// Insert Line
+	IL(int) error
+
+	// Delete Line
+	DL(int) error
+
+	// Insert Character
+	ICH(int) error
+
+	// Delete Character
+	DCH(int) error
+
+	// Set Graphics Rendition
+	SGR([]int) error
+
+	// Pan Down
+	SU(int) error
+
+	// Pan Up
+	SD(int) error
+
+	// Device Attributes
+	DA([]string) error
+
+	// Set Top and Bottom Margins
+	DECSTBM(int, int) error
+
+	// Index
+	IND() error
+
+	// Reverse Index
+	RI() error
+
+	// Flush updates from previous commands
+	Flush() error
+}
diff --git a/vendor/github.com/Azure/go-ansiterm/ground_state.go b/vendor/github.com/Azure/go-ansiterm/ground_state.go
new file mode 100644
index 00000000..52451e94
--- /dev/null
+++ b/vendor/github.com/Azure/go-ansiterm/ground_state.go
@@ -0,0 +1,24 @@
+package ansiterm
+
+type groundState struct {
+	baseState
+}
+
+func (gs groundState) Handle(b byte) (s state, e error) {
+	gs.parser.context.currentChar = b
+
+	nextState, err := gs.baseState.Handle(b)
+	if nextState != nil || err != nil {
+		return nextState, err
+	}
+
+	switch {
+	case sliceContains(printables, b):
+		return gs, gs.parser.print()
+
+	case sliceContains(executors, b):
+		return gs, gs.parser.execute()
+	}
+
+	return gs, nil
+}
diff --git a/vendor/github.com/Azure/go-ansiterm/osc_string_state.go b/vendor/github.com/Azure/go-ansiterm/osc_string_state.go
new file mode 100644
index 00000000..593b10ab
--- /dev/null
+++ b/vendor/github.com/Azure/go-ansiterm/osc_string_state.go
@@ -0,0 +1,31 @@
+package ansiterm
+
+type oscStringState struct {
+	baseState
+}
+
+func (oscState oscStringState) Handle(b byte) (s state, e error) {
+	oscState.parser.logf("OscString::Handle %#x", b)
+	nextState, err := oscState.baseState.Handle(b)
+	if nextState != nil || err != nil {
+		return nextState, err
+	}
+
+	switch {
+	case isOscStringTerminator(b):
+		return oscState.parser.ground, nil
+	}
+
+	return oscState, nil
+}
+
+// See below for OSC string terminators for linux
+// http://man7.org/linux/man-pages/man4/console_codes.4.html
+func isOscStringTerminator(b byte) bool {
+
+	if b == ANSI_BEL || b == 0x5C {
+		return true
+	}
+
+	return false
+}
diff --git a/vendor/github.com/Azure/go-ansiterm/parser.go b/vendor/github.com/Azure/go-ansiterm/parser.go
new file mode 100644
index 00000000..03cec7ad
--- /dev/null
+++ b/vendor/github.com/Azure/go-ansiterm/parser.go
@@ -0,0 +1,151 @@
+package ansiterm
+
+import (
+	"errors"
+	"log"
+	"os"
+)
+
+type AnsiParser struct {
+	currState          state
+	eventHandler       AnsiEventHandler
+	context            *ansiContext
+	csiEntry           state
+	csiParam           state
+	dcsEntry           state
+	escape             state
+	escapeIntermediate state
+	error              state
+	ground             state
+	oscString          state
+	stateMap           []state
+
+	logf func(string, ...interface{})
+}
+
+type Option func(*AnsiParser)
+
+func WithLogf(f func(string, ...interface{})) Option {
+	return func(ap *AnsiParser) {
+		ap.logf = f
+	}
+}
+
+func CreateParser(initialState string, evtHandler AnsiEventHandler, opts ...Option) *AnsiParser {
+	ap := &AnsiParser{
+		eventHandler: evtHandler,
+		context:      &ansiContext{},
+	}
+	for _, o := range opts {
+		o(ap)
+	}
+
+	if isDebugEnv := os.Getenv(LogEnv); isDebugEnv == "1" {
+		logFile, _ := os.Create("ansiParser.log")
+		logger := log.New(logFile, "", log.LstdFlags)
+		if ap.logf != nil {
+			l := ap.logf
+			ap.logf = func(s string, v ...interface{}) {
+				l(s, v...)
+				logger.Printf(s, v...)
+			}
+		} else {
+			ap.logf = logger.Printf
+		}
+	}
+
+	if ap.logf == nil {
+		ap.logf = func(string, ...interface{}) {}
+	}
+
+	ap.csiEntry = csiEntryState{baseState{name: "CsiEntry", parser: ap}}
+	ap.csiParam = csiParamState{baseState{name: "CsiParam", parser: ap}}
+	ap.dcsEntry = dcsEntryState{baseState{name: "DcsEntry", parser: ap}}
+	ap.escape = escapeState{baseState{name: "Escape", parser: ap}}
+	ap.escapeIntermediate = escapeIntermediateState{baseState{name: "EscapeIntermediate", parser: ap}}
+	ap.error = errorState{baseState{name: "Error", parser: ap}}
+	ap.ground = groundState{baseState{name: "Ground", parser: ap}}
+	ap.oscString = oscStringState{baseState{name: "OscString", parser: ap}}
+
+	ap.stateMap = []state{
+		ap.csiEntry,
+		ap.csiParam,
+		ap.dcsEntry,
+		ap.escape,
+		ap.escapeIntermediate,
+		ap.error,
+		ap.ground,
+		ap.oscString,
+	}
+
+	ap.currState = getState(initialState, ap.stateMap)
+
+	ap.logf("CreateParser: parser %p", ap)
+	return ap
+}
+
+func getState(name string, states []state) state {
+	for _, el := range states {
+		if el.Name() == name {
+			return el
+		}
+	}
+
+	return nil
+}
+
+func (ap *AnsiParser) Parse(bytes []byte) (int, error) {
+	for i, b := range bytes {
+		if err := ap.handle(b); err != nil {
+			return i, err
+		}
+	}
+
+	return len(bytes), ap.eventHandler.Flush()
+}
+
+func (ap *AnsiParser) handle(b byte) error {
+	ap.context.currentChar = b
+	newState, err := ap.currState.Handle(b)
+	if err != nil {
+		return err
+	}
+
+	if newState == nil {
+		ap.logf("WARNING: newState is nil")
+		return errors.New("New state of 'nil' is invalid.")
+	}
+
+	if newState != ap.currState {
+		if err := ap.changeState(newState); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (ap *AnsiParser) changeState(newState state) error {
+	ap.logf("ChangeState %s --> %s", ap.currState.Name(), newState.Name())
+
+	// Exit old state
+	if err := ap.currState.Exit(); err != nil {
+		ap.logf("Exit state '%s' failed with : '%v'", ap.currState.Name(), err)
+		return err
+	}
+
+	// Perform transition action
+	if err := ap.currState.Transition(newState); err != nil {
+		ap.logf("Transition from '%s' to '%s' failed with: '%v'", ap.currState.Name(), newState.Name, err)
+		return err
+	}
+
+	// Enter new state
+	if err := newState.Enter(); err != nil {
+		ap.logf("Enter state '%s' failed with: '%v'", newState.Name(), err)
+		return err
+	}
+
+	ap.currState = newState
+	return nil
+}
diff --git a/vendor/github.com/Azure/go-ansiterm/parser_action_helpers.go b/vendor/github.com/Azure/go-ansiterm/parser_action_helpers.go
new file mode 100644
index 00000000..de0a1f9c
--- /dev/null
+++ b/vendor/github.com/Azure/go-ansiterm/parser_action_helpers.go
@@ -0,0 +1,99 @@
+package ansiterm
+
+import (
+	"strconv"
+)
+
+func parseParams(bytes []byte) ([]string, error) {
+	paramBuff := make([]byte, 0, 0)
+	params := []string{}
+
+	for _, v := range bytes {
+		if v == ';' {
+			if len(paramBuff) > 0 {
+				// Completed parameter, append it to the list
+				s := string(paramBuff)
+				params = append(params, s)
+				paramBuff = make([]byte, 0, 0)
+			}
+		} else {
+			paramBuff = append(paramBuff, v)
+		}
+	}
+
+	// Last parameter may not be terminated with ';'
+	if len(paramBuff) > 0 {
+		s := string(paramBuff)
+		params = append(params, s)
+	}
+
+	return params, nil
+}
+
+func parseCmd(context ansiContext) (string, error) {
+	return string(context.currentChar), nil
+}
+
+func getInt(params []string, dflt int) int {
+	i := getInts(params, 1, dflt)[0]
+	return i
+}
+
+func getInts(params []string, minCount int, dflt int) []int {
+	ints := []int{}
+
+	for _, v := range params {
+		i, _ := strconv.Atoi(v)
+		// Zero is mapped to the default value in VT100.
+		if i == 0 {
+			i = dflt
+		}
+		ints = append(ints, i)
+	}
+
+	if len(ints) < minCount {
+		remaining := minCount - len(ints)
+		for i := 0; i < remaining; i++ {
+			ints = append(ints, dflt)
+		}
+	}
+
+	return ints
+}
+
+func (ap *AnsiParser) modeDispatch(param string, set bool) error {
+	switch param {
+	case "?3":
+		return ap.eventHandler.DECCOLM(set)
+	case "?6":
+		return ap.eventHandler.DECOM(set)
+	case "?25":
+		return ap.eventHandler.DECTCEM(set)
+	}
+	return nil
+}
+
+func (ap *AnsiParser) hDispatch(params []string) error {
+	if len(params) == 1 {
+		return ap.modeDispatch(params[0], true)
+	}
+
+	return nil
+}
+
+func (ap *AnsiParser) lDispatch(params []string) error {
+	if len(params) == 1 {
+		return ap.modeDispatch(params[0], false)
+	}
+
+	return nil
+}
+
+func getEraseParam(params []string) int {
+	param := getInt(params, 0)
+	if param < 0 || 3 < param {
+		param = 0
+	}
+
+	return param
+}
diff --git a/vendor/github.com/Azure/go-ansiterm/parser_actions.go b/vendor/github.com/Azure/go-ansiterm/parser_actions.go
new file mode 100644
index 00000000..0bb5e51e
--- /dev/null
+++ b/vendor/github.com/Azure/go-ansiterm/parser_actions.go
@@ -0,0 +1,119 @@
+package ansiterm
+
+func (ap *AnsiParser) collectParam() error {
+	currChar := ap.context.currentChar
+	ap.logf("collectParam %#x", currChar)
+	ap.context.paramBuffer = append(ap.context.paramBuffer, currChar)
+	return nil
+}
+
+func (ap *AnsiParser) collectInter() error {
+	currChar := ap.context.currentChar
+	ap.logf("collectInter %#x", currChar)
+	ap.context.paramBuffer = append(ap.context.interBuffer, currChar)
+	return nil
+}
+
+func (ap *AnsiParser) escDispatch() error {
+	cmd, _ := parseCmd(*ap.context)
+	intermeds := ap.context.interBuffer
+	ap.logf("escDispatch currentChar: %#x", ap.context.currentChar)
+	ap.logf("escDispatch: %v(%v)", cmd, intermeds)
+
+	switch cmd {
+	case "D": // IND
+		return ap.eventHandler.IND()
+	case "E": // NEL, equivalent to CRLF
+		err := ap.eventHandler.Execute(ANSI_CARRIAGE_RETURN)
+		if err == nil {
+			err = ap.eventHandler.Execute(ANSI_LINE_FEED)
+		}
+		return err
+	case "M": // RI
+		return ap.eventHandler.RI()
+	}
+
+	return nil
+}
+
+func (ap *AnsiParser) csiDispatch() error {
+	cmd, _ := parseCmd(*ap.context)
+	params, _ := parseParams(ap.context.paramBuffer)
+	ap.logf("Parsed params: %v with length: %d", params, len(params))
+
+	ap.logf("csiDispatch: %v(%v)", cmd, params)
+
+	switch cmd {
+	case "@":
+		return ap.eventHandler.ICH(getInt(params, 1))
+	case "A":
+		return ap.eventHandler.CUU(getInt(params, 1))
+	case "B":
+		return ap.eventHandler.CUD(getInt(params, 1))
+	case "C":
+		return ap.eventHandler.CUF(getInt(params, 1))
+	case "D":
+		return ap.eventHandler.CUB(getInt(params, 1))
+	case "E":
+		return ap.eventHandler.CNL(getInt(params, 1))
+	case "F":
+		return ap.eventHandler.CPL(getInt(params, 1))
+	case "G":
+		return ap.eventHandler.CHA(getInt(params, 1))
+	case "H":
+		ints := getInts(params, 2, 1)
+		x, y := ints[0], ints[1]
+		return ap.eventHandler.CUP(x, y)
+	case "J":
+		param := getEraseParam(params)
+		return ap.eventHandler.ED(param)
+	case "K":
+		param := getEraseParam(params)
+		return ap.eventHandler.EL(param)
+	case "L":
+		return ap.eventHandler.IL(getInt(params, 1))
+	case "M":
+		return ap.eventHandler.DL(getInt(params, 1))
+	case "P":
+		return ap.eventHandler.DCH(getInt(params, 1))
+	case "S":
+		return ap.eventHandler.SU(getInt(params, 1))
+	case "T":
+		return ap.eventHandler.SD(getInt(params, 1))
+	case "c":
+		return ap.eventHandler.DA(params)
+	case "d":
+		return ap.eventHandler.VPA(getInt(params, 1))
+	case "f":
+		ints := getInts(params, 2, 1)
+		x, y := ints[0], ints[1]
+		return ap.eventHandler.HVP(x, y)
+	case "h":
+		return ap.hDispatch(params)
+	case "l":
+		return ap.lDispatch(params)
+	case "m":
+		return ap.eventHandler.SGR(getInts(params, 1, 0))
+	case "r":
+		ints := getInts(params, 2, 1)
+		top, bottom := ints[0], ints[1]
+		return ap.eventHandler.DECSTBM(top, bottom)
+	default:
+		ap.logf("ERROR: Unsupported CSI command: '%s', with full context:  %v", cmd, ap.context)
+		return nil
+	}
+
+}
+
+func (ap *AnsiParser) print() error {
+	return ap.eventHandler.Print(ap.context.currentChar)
+}
+
+func (ap *AnsiParser) clear() error {
+	ap.context = &ansiContext{}
+	return nil
+}
+
+func (ap *AnsiParser) execute() error {
+	return ap.eventHandler.Execute(ap.context.currentChar)
+}
diff --git a/vendor/github.com/Azure/go-ansiterm/states.go b/vendor/github.com/Azure/go-ansiterm/states.go
new file mode 100644
index 00000000..f2ea1fcd
--- /dev/null
+++ b/vendor/github.com/Azure/go-ansiterm/states.go
@@ -0,0 +1,71 @@
+package ansiterm
+
+type stateID int
+
+type state interface {
+	Enter() error
+	Exit() error
+	Handle(byte) (state, error)
+	Name() string
+	Transition(state) error
+}
+
+type baseState struct {
+	name   string
+	parser *AnsiParser
+}
+
+func (base baseState) Enter() error {
+	return nil
+}
+
+func (base baseState) Exit() error {
+	return nil
+}
+
+func (base baseState) Handle(b byte) (s state, e error) {
+
+	switch {
+	case b == CSI_ENTRY:
+		return base.parser.csiEntry, nil
+	case b == DCS_ENTRY:
+		return base.parser.dcsEntry, nil
+	case b == ANSI_ESCAPE_PRIMARY:
+		return base.parser.escape, nil
+	case b == OSC_STRING:
+		return base.parser.oscString, nil
+	case sliceContains(toGroundBytes, b):
+		return base.parser.ground, nil
+	}
+
+	return nil, nil
+}
+
+func (base baseState) Name() string {
+	return base.name
+}
+
+func (base baseState) Transition(s state) error {
+	if s == base.parser.ground {
+		execBytes := []byte{0x18}
+		execBytes = append(execBytes, 0x1A)
+		execBytes = append(execBytes, getByteRange(0x80, 0x8F)...)
+		execBytes = append(execBytes, getByteRange(0x91, 0x97)...)
+		execBytes = append(execBytes, 0x99)
+		execBytes = append(execBytes, 0x9A)
+
+		if sliceContains(execBytes, base.parser.context.currentChar) {
+			return base.parser.execute()
+		}
+	}
+
+	return nil
+}
+
+type dcsEntryState struct {
+	baseState
+}
+
+type errorState struct {
+	baseState
+}
diff --git a/vendor/github.com/Azure/go-ansiterm/utilities.go b/vendor/github.com/Azure/go-ansiterm/utilities.go
new file mode 100644
index 00000000..39211449
--- /dev/null
+++ b/vendor/github.com/Azure/go-ansiterm/utilities.go
@@ -0,0 +1,21 @@
+package ansiterm
+
+import (
+	"strconv"
+)
+
+func sliceContains(bytes []byte, b byte) bool {
+	for _, v := range bytes {
+		if v == b {
+			return true
+		}
+	}
+
+	return false
+}
+
+func convertBytesToInteger(bytes []byte) int {
+	s := string(bytes)
+	i, _ := strconv.Atoi(s)
+	return i
+}
diff --git a/vendor/github.com/Azure/go-ansiterm/winterm/ansi.go b/vendor/github.com/Azure/go-ansiterm/winterm/ansi.go
new file mode 100644
index 00000000..a6732797
--- /dev/null
+++ b/vendor/github.com/Azure/go-ansiterm/winterm/ansi.go
@@ -0,0 +1,182 @@
+// +build windows
+
+package winterm
+
+import (
+	"fmt"
+	"os"
+	"strconv"
+	"strings"
+	"syscall"
+
+	"github.com/Azure/go-ansiterm"
+)
+
+// Windows keyboard constants
+// See https://msdn.microsoft.com/en-us/library/windows/desktop/dd375731(v=vs.85).aspx.
+const (
+	VK_PRIOR    = 0x21 // PAGE UP key
+	VK_NEXT     = 0x22 // PAGE DOWN key
+	VK_END      = 0x23 // END key
+	VK_HOME     = 0x24 // HOME key
+	VK_LEFT     = 0x25 // LEFT ARROW key
+	VK_UP       = 0x26 // UP ARROW key
+	VK_RIGHT    = 0x27 // RIGHT ARROW key
+	VK_DOWN     = 0x28 // DOWN ARROW key
+	VK_SELECT   = 0x29 // SELECT key
+	VK_PRINT    = 0x2A // PRINT key
+	VK_EXECUTE  = 0x2B // EXECUTE key
+	VK_SNAPSHOT = 0x2C // PRINT SCREEN key
+	VK_INSERT   = 0x2D // INS key
+	VK_DELETE   = 0x2E // DEL key
+	VK_HELP     = 0x2F // HELP key
+	VK_F1       = 0x70 // F1 key
+	VK_F2       = 0x71 // F2 key
+	VK_F3       = 0x72 // F3 key
+	VK_F4       = 0x73 // F4 key
+	VK_F5       = 0x74 // F5 key
+	VK_F6       = 0x75 // F6 key
+	VK_F7       = 0x76 // F7 key
+	VK_F8       = 0x77 // F8 key
+	VK_F9       = 0x78 // F9 key
+	VK_F10      = 0x79 // F10 key
+	VK_F11      = 0x7A // F11 key
+	VK_F12      = 0x7B // F12 key
+
+	RIGHT_ALT_PRESSED  = 0x0001
+	LEFT_ALT_PRESSED   = 0x0002
+	RIGHT_CTRL_PRESSED = 0x0004
+	LEFT_CTRL_PRESSED  = 0x0008
+	SHIFT_PRESSED      = 0x0010
+	NUMLOCK_ON         = 0x0020
+	SCROLLLOCK_ON      = 0x0040
+	CAPSLOCK_ON        = 0x0080
+	ENHANCED_KEY       = 0x0100
+)
+
+type ansiCommand struct {
+	CommandBytes []byte
+	Command      string
+	Parameters   []string
+	IsSpecial    bool
+}
+
+func newAnsiCommand(command []byte) *ansiCommand {
+
+	if isCharacterSelectionCmdChar(command[1]) {
+		// Is Character Set Selection commands
+		return &ansiCommand{
+			CommandBytes: command,
+			Command:      string(command),
+			IsSpecial:    true,
+		}
+	}
+
+	// last char is command character
+	lastCharIndex := len(command) - 1
+
+	ac := &ansiCommand{
+		CommandBytes: command,
+		Command:      string(command[lastCharIndex]),
+		IsSpecial:    false,
+	}
+
+	// more than a single escape
+	if lastCharIndex != 0 {
+		start := 1
+		// skip if double char escape sequence
+		if command[0] == ansiterm.ANSI_ESCAPE_PRIMARY && command[1] == ansiterm.ANSI_ESCAPE_SECONDARY {
+			start++
+		}
+		// convert this to GetNextParam method
+		ac.Parameters = strings.Split(string(command[start:lastCharIndex]), ansiterm.ANSI_PARAMETER_SEP)
+	}
+
+	return ac
+}
+
+func (ac *ansiCommand) paramAsSHORT(index int, defaultValue int16) int16 {
+	if index < 0 || index >= len(ac.Parameters) {
+		return defaultValue
+	}
+
+	param, err := strconv.ParseInt(ac.Parameters[index], 10, 16)
+	if err != nil {
+		return defaultValue
+	}
+
+	return int16(param)
+}
+
+func (ac *ansiCommand) String() string {
+	return fmt.Sprintf("0x%v \"%v\" (\"%v\")",
+		bytesToHex(ac.CommandBytes),
+		ac.Command,
+		strings.Join(ac.Parameters, "\",\""))
+}
+
+// isAnsiCommandChar returns true if the passed byte falls within the range of ANSI commands.
+// See http://manpages.ubuntu.com/manpages/intrepid/man4/console_codes.4.html.
+func isAnsiCommandChar(b byte) bool {
+	switch {
+	case ansiterm.ANSI_COMMAND_FIRST <= b && b <= ansiterm.ANSI_COMMAND_LAST && b != ansiterm.ANSI_ESCAPE_SECONDARY:
+		return true
+	case b == ansiterm.ANSI_CMD_G1 || b == ansiterm.ANSI_CMD_OSC || b == ansiterm.ANSI_CMD_DECPAM || b == ansiterm.ANSI_CMD_DECPNM:
+		// non-CSI escape sequence terminator
+		return true
+	case b == ansiterm.ANSI_CMD_STR_TERM || b == ansiterm.ANSI_BEL:
+		// String escape sequence terminator
+		return true
+	}
+	return false
+}
+
+func isXtermOscSequence(command []byte, current byte) bool {
+	return (len(command) >= 2 && command[0] == ansiterm.ANSI_ESCAPE_PRIMARY && command[1] == ansiterm.ANSI_CMD_OSC && current != ansiterm.ANSI_BEL)
+}
+
+func isCharacterSelectionCmdChar(b byte) bool {
+	return (b == ansiterm.ANSI_CMD_G0 || b == ansiterm.ANSI_CMD_G1 || b == ansiterm.ANSI_CMD_G2 || b == ansiterm.ANSI_CMD_G3)
+}
+
+// bytesToHex converts a slice of bytes to a human-readable string.
+func bytesToHex(b []byte) string {
+	hex := make([]string, len(b))
+	for i, ch := range b {
+		hex[i] = fmt.Sprintf("%X", ch)
+	}
+	return strings.Join(hex, "")
+}
+
+// ensureInRange adjusts the passed value, if necessary, to ensure it is within
+// the passed min / max range.
+func ensureInRange(n int16, min int16, max int16) int16 {
+	if n < min {
+		return min
+	} else if n > max {
+		return max
+	} else {
+		return n
+	}
+}
+
+func GetStdFile(nFile int) (*os.File, uintptr) {
+	var file *os.File
+	switch nFile {
+	case syscall.STD_INPUT_HANDLE:
+		file = os.Stdin
+	case syscall.STD_OUTPUT_HANDLE:
+		file = os.Stdout
+	case syscall.STD_ERROR_HANDLE:
+		file = os.Stderr
+	default:
+		panic(fmt.Errorf("Invalid standard handle identifier: %v", nFile))
+	}
+
+	fd, err := syscall.GetStdHandle(nFile)
+	if err != nil {
+		panic(fmt.Errorf("Invalid standard handle identifier: %v -- %v", nFile, err))
+	}
+
+	return file, uintptr(fd)
+}
diff --git a/vendor/github.com/Azure/go-ansiterm/winterm/api.go b/vendor/github.com/Azure/go-ansiterm/winterm/api.go
new file mode 100644
index 00000000..6055e33b
--- /dev/null
+++ b/vendor/github.com/Azure/go-ansiterm/winterm/api.go
@@ -0,0 +1,327 @@
+// +build windows
+
+package winterm
+
+import (
+	"fmt"
+	"syscall"
+	"unsafe"
+)
+
+//===========================================================================================================
+// IMPORTANT NOTE:
+//
+//	The methods below make extensive use of the "unsafe" package to obtain the required pointers.
+//	Beginning in Go 1.3, the garbage collector may release local variables (e.g., incoming arguments, stack
+//	variables) the pointers reference *before* the API completes.
+//
+//  As a result, in those cases, the code must hint that the variables remain in active by invoking the
+//	dummy method "use" (see below). Newer versions of Go are planned to change the mechanism to no longer
+//	require unsafe pointers.
+//
+//	If you add or modify methods, ENSURE protection of local variables through the "use" builtin to inform
+//	the garbage collector the variables remain in use if:
+//
+//	-- The value is not a pointer (e.g., int32, struct)
+//	-- The value is not referenced by the method after passing the pointer to Windows
+//
+//	See http://golang.org/doc/go1.3.
+//===========================================================================================================
+
+var (
+	kernel32DLL = syscall.NewLazyDLL("kernel32.dll")
+
+	getConsoleCursorInfoProc       = kernel32DLL.NewProc("GetConsoleCursorInfo")
+	setConsoleCursorInfoProc       = kernel32DLL.NewProc("SetConsoleCursorInfo")
+	setConsoleCursorPositionProc   = kernel32DLL.NewProc("SetConsoleCursorPosition")
+	setConsoleModeProc             = kernel32DLL.NewProc("SetConsoleMode")
+	getConsoleScreenBufferInfoProc = kernel32DLL.NewProc("GetConsoleScreenBufferInfo")
+	setConsoleScreenBufferSizeProc = kernel32DLL.NewProc("SetConsoleScreenBufferSize")
+	scrollConsoleScreenBufferProc  = kernel32DLL.NewProc("ScrollConsoleScreenBufferA")
+	setConsoleTextAttributeProc    = kernel32DLL.NewProc("SetConsoleTextAttribute")
+	setConsoleWindowInfoProc       = kernel32DLL.NewProc("SetConsoleWindowInfo")
+	writeConsoleOutputProc         = kernel32DLL.NewProc("WriteConsoleOutputW")
+	readConsoleInputProc           = kernel32DLL.NewProc("ReadConsoleInputW")
+	waitForSingleObjectProc        = kernel32DLL.NewProc("WaitForSingleObject")
+)
+
+// Windows Console constants
+const (
+	// Console modes
+	// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms686033(v=vs.85).aspx.
+	ENABLE_PROCESSED_INPUT        = 0x0001
+	ENABLE_LINE_INPUT             = 0x0002
+	ENABLE_ECHO_INPUT             = 0x0004
+	ENABLE_WINDOW_INPUT           = 0x0008
+	ENABLE_MOUSE_INPUT            = 0x0010
+	ENABLE_INSERT_MODE            = 0x0020
+	ENABLE_QUICK_EDIT_MODE        = 0x0040
+	ENABLE_EXTENDED_FLAGS         = 0x0080
+	ENABLE_AUTO_POSITION          = 0x0100
+	ENABLE_VIRTUAL_TERMINAL_INPUT = 0x0200
+
+	ENABLE_PROCESSED_OUTPUT            = 0x0001
+	ENABLE_WRAP_AT_EOL_OUTPUT          = 0x0002
+	ENABLE_VIRTUAL_TERMINAL_PROCESSING = 0x0004
+	DISABLE_NEWLINE_AUTO_RETURN        = 0x0008
+	ENABLE_LVB_GRID_WORLDWIDE          = 0x0010
+
+	// Character attributes
+	// Note:
+	// -- The attributes are combined to produce various colors (e.g., Blue + Green will create Cyan).
+	//    Clearing all foreground or background colors results in black; setting all creates white.
+	// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms682088(v=vs.85).aspx#_win32_character_attributes.
+	FOREGROUND_BLUE      uint16 = 0x0001
+	FOREGROUND_GREEN     uint16 = 0x0002
+	FOREGROUND_RED       uint16 = 0x0004
+	FOREGROUND_INTENSITY uint16 = 0x0008
+	FOREGROUND_MASK      uint16 = 0x000F
+
+	BACKGROUND_BLUE      uint16 = 0x0010
+	BACKGROUND_GREEN     uint16 = 0x0020
+	BACKGROUND_RED       uint16 = 0x0040
+	BACKGROUND_INTENSITY uint16 = 0x0080
+	BACKGROUND_MASK      uint16 = 0x00F0
+
+	COMMON_LVB_MASK          uint16 = 0xFF00
+	COMMON_LVB_REVERSE_VIDEO uint16 = 0x4000
+	COMMON_LVB_UNDERSCORE    uint16 = 0x8000
+
+	// Input event types
+	// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms683499(v=vs.85).aspx.
+	KEY_EVENT                = 0x0001
+	MOUSE_EVENT              = 0x0002
+	WINDOW_BUFFER_SIZE_EVENT = 0x0004
+	MENU_EVENT               = 0x0008
+	FOCUS_EVENT              = 0x0010
+
+	// WaitForSingleObject return codes
+	WAIT_ABANDONED = 0x00000080
+	WAIT_FAILED    = 0xFFFFFFFF
+	WAIT_SIGNALED  = 0x0000000
+	WAIT_TIMEOUT   = 0x00000102
+
+	// WaitForSingleObject wait duration
+	WAIT_INFINITE       = 0xFFFFFFFF
+	WAIT_ONE_SECOND     = 1000
+	WAIT_HALF_SECOND    = 500
+	WAIT_QUARTER_SECOND = 250
+)
+
+// Windows API Console types
+// -- See https://msdn.microsoft.com/en-us/library/windows/desktop/ms682101(v=vs.85).aspx for Console specific types (e.g., COORD)
+// -- See https://msdn.microsoft.com/en-us/library/aa296569(v=vs.60).aspx for comments on alignment
+type (
+	CHAR_INFO struct {
+		UnicodeChar uint16
+		Attributes  uint16
+	}
+
+	CONSOLE_CURSOR_INFO struct {
+		Size    uint32
+		Visible int32
+	}
+
+	CONSOLE_SCREEN_BUFFER_INFO struct {
+		Size              COORD
+		CursorPosition    COORD
+		Attributes        uint16
+		Window            SMALL_RECT
+		MaximumWindowSize COORD
+	}
+
+	COORD struct {
+		X int16
+		Y int16
+	}
+
+	SMALL_RECT struct {
+		Left   int16
+		Top    int16
+		Right  int16
+		Bottom int16
+	}
+
+	// INPUT_RECORD is a C/C++ union of which KEY_EVENT_RECORD is one case, it is also the largest
+	// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms683499(v=vs.85).aspx.
+	INPUT_RECORD struct {
+		EventType uint16
+		KeyEvent  KEY_EVENT_RECORD
+	}
+
+	KEY_EVENT_RECORD struct {
+		KeyDown         int32
+		RepeatCount     uint16
+		VirtualKeyCode  uint16
+		VirtualScanCode uint16
+		UnicodeChar     uint16
+		ControlKeyState uint32
+	}
+
+	WINDOW_BUFFER_SIZE struct {
+		Size COORD
+	}
+)
+
+// boolToBOOL converts a Go bool into a Windows int32.
+func boolToBOOL(f bool) int32 {
+	if f {
+		return int32(1)
+	} else {
+		return int32(0)
+	}
+}
+
+// GetConsoleCursorInfo retrieves information about the size and visiblity of the console cursor.
+// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms683163(v=vs.85).aspx.
+func GetConsoleCursorInfo(handle uintptr, cursorInfo *CONSOLE_CURSOR_INFO) error {
+	r1, r2, err := getConsoleCursorInfoProc.Call(handle, uintptr(unsafe.Pointer(cursorInfo)), 0)
+	return checkError(r1, r2, err)
+}
+
+// SetConsoleCursorInfo sets the size and visiblity of the console cursor.
+// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms686019(v=vs.85).aspx.
+func SetConsoleCursorInfo(handle uintptr, cursorInfo *CONSOLE_CURSOR_INFO) error {
+	r1, r2, err := setConsoleCursorInfoProc.Call(handle, uintptr(unsafe.Pointer(cursorInfo)), 0)
+	return checkError(r1, r2, err)
+}
+
+// SetConsoleCursorPosition location of the console cursor.
+// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms686025(v=vs.85).aspx.
+func SetConsoleCursorPosition(handle uintptr, coord COORD) error {
+	r1, r2, err := setConsoleCursorPositionProc.Call(handle, coordToPointer(coord))
+	use(coord)
+	return checkError(r1, r2, err)
+}
+
+// GetConsoleMode gets the console mode for given file descriptor
+// See http://msdn.microsoft.com/en-us/library/windows/desktop/ms683167(v=vs.85).aspx.
+func GetConsoleMode(handle uintptr) (mode uint32, err error) {
+	err = syscall.GetConsoleMode(syscall.Handle(handle), &mode)
+	return mode, err
+}
+
+// SetConsoleMode sets the console mode for given file descriptor
+// See http://msdn.microsoft.com/en-us/library/windows/desktop/ms686033(v=vs.85).aspx.
+func SetConsoleMode(handle uintptr, mode uint32) error {
+	r1, r2, err := setConsoleModeProc.Call(handle, uintptr(mode), 0)
+	use(mode)
+	return checkError(r1, r2, err)
+}
+
+// GetConsoleScreenBufferInfo retrieves information about the specified console screen buffer.
+// See http://msdn.microsoft.com/en-us/library/windows/desktop/ms683171(v=vs.85).aspx.
+func GetConsoleScreenBufferInfo(handle uintptr) (*CONSOLE_SCREEN_BUFFER_INFO, error) {
+	info := CONSOLE_SCREEN_BUFFER_INFO{}
+	err := checkError(getConsoleScreenBufferInfoProc.Call(handle, uintptr(unsafe.Pointer(&info)), 0))
+	if err != nil {
+		return nil, err
+	}
+	return &info, nil
+}
+
+func ScrollConsoleScreenBuffer(handle uintptr, scrollRect SMALL_RECT, clipRect SMALL_RECT, destOrigin COORD, char CHAR_INFO) error {
+	r1, r2, err := scrollConsoleScreenBufferProc.Call(handle, uintptr(unsafe.Pointer(&scrollRect)), uintptr(unsafe.Pointer(&clipRect)), coordToPointer(destOrigin), uintptr(unsafe.Pointer(&char)))
+	use(scrollRect)
+	use(clipRect)
+	use(destOrigin)
+	use(char)
+	return checkError(r1, r2, err)
+}
+
+// SetConsoleScreenBufferSize sets the size of the console screen buffer.
+// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms686044(v=vs.85).aspx.
+func SetConsoleScreenBufferSize(handle uintptr, coord COORD) error {
+	r1, r2, err := setConsoleScreenBufferSizeProc.Call(handle, coordToPointer(coord))
+	use(coord)
+	return checkError(r1, r2, err)
+}
+
+// SetConsoleTextAttribute sets the attributes of characters written to the
+// console screen buffer by the WriteFile or WriteConsole function.
+// See http://msdn.microsoft.com/en-us/library/windows/desktop/ms686047(v=vs.85).aspx.
+func SetConsoleTextAttribute(handle uintptr, attribute uint16) error {
+	r1, r2, err := setConsoleTextAttributeProc.Call(handle, uintptr(attribute), 0)
+	use(attribute)
+	return checkError(r1, r2, err)
+}
+
+// SetConsoleWindowInfo sets the size and position of the console screen buffer's window.
+// Note that the size and location must be within and no larger than the backing console screen buffer.
+// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms686125(v=vs.85).aspx.
+func SetConsoleWindowInfo(handle uintptr, isAbsolute bool, rect SMALL_RECT) error {
+	r1, r2, err := setConsoleWindowInfoProc.Call(handle, uintptr(boolToBOOL(isAbsolute)), uintptr(unsafe.Pointer(&rect)))
+	use(isAbsolute)
+	use(rect)
+	return checkError(r1, r2, err)
+}
+
+// WriteConsoleOutput writes the CHAR_INFOs from the provided buffer to the active console buffer.
+// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms687404(v=vs.85).aspx.
+func WriteConsoleOutput(handle uintptr, buffer []CHAR_INFO, bufferSize COORD, bufferCoord COORD, writeRegion *SMALL_RECT) error {
+	r1, r2, err := writeConsoleOutputProc.Call(handle, uintptr(unsafe.Pointer(&buffer[0])), coordToPointer(bufferSize), coordToPointer(bufferCoord), uintptr(unsafe.Pointer(writeRegion)))
+	use(buffer)
+	use(bufferSize)
+	use(bufferCoord)
+	return checkError(r1, r2, err)
+}
+
+// ReadConsoleInput reads (and removes) data from the console input buffer.
+// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms684961(v=vs.85).aspx.
+func ReadConsoleInput(handle uintptr, buffer []INPUT_RECORD, count *uint32) error {
+	r1, r2, err := readConsoleInputProc.Call(handle, uintptr(unsafe.Pointer(&buffer[0])), uintptr(len(buffer)), uintptr(unsafe.Pointer(count)))
+	use(buffer)
+	return checkError(r1, r2, err)
+}
+
+// WaitForSingleObject waits for the passed handle to be signaled.
+// It returns true if the handle was signaled; false otherwise.
+// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms687032(v=vs.85).aspx.
+func WaitForSingleObject(handle uintptr, msWait uint32) (bool, error) {
+	r1, _, err := waitForSingleObjectProc.Call(handle, uintptr(uint32(msWait)))
+	switch r1 {
+	case WAIT_ABANDONED, WAIT_TIMEOUT:
+		return false, nil
+	case WAIT_SIGNALED:
+		return true, nil
+	}
+	use(msWait)
+	return false, err
+}
+
+// String helpers
+func (info CONSOLE_SCREEN_BUFFER_INFO) String() string {
+	return fmt.Sprintf("Size(%v) Cursor(%v) Window(%v) Max(%v)", info.Size, info.CursorPosition, info.Window, info.MaximumWindowSize)
+}
+
+func (coord COORD) String() string {
+	return fmt.Sprintf("%v,%v", coord.X, coord.Y)
+}
+
+func (rect SMALL_RECT) String() string {
+	return fmt.Sprintf("(%v,%v),(%v,%v)", rect.Left, rect.Top, rect.Right, rect.Bottom)
+}
+
+// checkError evaluates the results of a Windows API call and returns the error if it failed.
+func checkError(r1, r2 uintptr, err error) error {
+	// Windows APIs return non-zero to indicate success
+	if r1 != 0 {
+		return nil
+	}
+
+	// Return the error if provided, otherwise default to EINVAL
+	if err != nil {
+		return err
+	}
+	return syscall.EINVAL
+}
+
+// coordToPointer converts a COORD into a uintptr (by fooling the type system).
+func coordToPointer(c COORD) uintptr {
+	// Note: This code assumes the two SHORTs are correctly laid out; the "cast" to uint32 is just to get a pointer to pass.
+	return uintptr(*((*uint32)(unsafe.Pointer(&c))))
+}
+
+// use is a no-op, but the compiler cannot see that it is.
+// Calling use(p) ensures that p is kept live until that point.
+func use(p interface{}) {}
diff --git a/vendor/github.com/Azure/go-ansiterm/winterm/attr_translation.go b/vendor/github.com/Azure/go-ansiterm/winterm/attr_translation.go
new file mode 100644
index 00000000..cbec8f72
--- /dev/null
+++ b/vendor/github.com/Azure/go-ansiterm/winterm/attr_translation.go
@@ -0,0 +1,100 @@
+// +build windows
+
+package winterm
+
+import "github.com/Azure/go-ansiterm"
+
+const (
+	FOREGROUND_COLOR_MASK = FOREGROUND_RED | FOREGROUND_GREEN | FOREGROUND_BLUE
+	BACKGROUND_COLOR_MASK = BACKGROUND_RED | BACKGROUND_GREEN | BACKGROUND_BLUE
+)
+
+// collectAnsiIntoWindowsAttributes modifies the passed Windows text mode flags to reflect the
+// request represented by the passed ANSI mode.
+func collectAnsiIntoWindowsAttributes(windowsMode uint16, inverted bool, baseMode uint16, ansiMode int16) (uint16, bool) {
+	switch ansiMode {
+
+	// Mode styles
+	case ansiterm.ANSI_SGR_BOLD:
+		windowsMode = windowsMode | FOREGROUND_INTENSITY
+
+	case ansiterm.ANSI_SGR_DIM, ansiterm.ANSI_SGR_BOLD_DIM_OFF:
+		windowsMode &^= FOREGROUND_INTENSITY
+
+	case ansiterm.ANSI_SGR_UNDERLINE:
+		windowsMode = windowsMode | COMMON_LVB_UNDERSCORE
+
+	case ansiterm.ANSI_SGR_REVERSE:
+		inverted = true
+
+	case ansiterm.ANSI_SGR_REVERSE_OFF:
+		inverted = false
+
+	case ansiterm.ANSI_SGR_UNDERLINE_OFF:
+		windowsMode &^= COMMON_LVB_UNDERSCORE
+
+		// Foreground colors
+	case ansiterm.ANSI_SGR_FOREGROUND_DEFAULT:
+		windowsMode = (windowsMode &^ FOREGROUND_MASK) | (baseMode & FOREGROUND_MASK)
+
+	case ansiterm.ANSI_SGR_FOREGROUND_BLACK:
+		windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK)
+
+	case ansiterm.ANSI_SGR_FOREGROUND_RED:
+		windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_RED
+
+	case ansiterm.ANSI_SGR_FOREGROUND_GREEN:
+		windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_GREEN
+
+	case ansiterm.ANSI_SGR_FOREGROUND_YELLOW:
+		windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_RED | FOREGROUND_GREEN
+
+	case ansiterm.ANSI_SGR_FOREGROUND_BLUE:
+		windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_BLUE
+
+	case ansiterm.ANSI_SGR_FOREGROUND_MAGENTA:
+		windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_RED | FOREGROUND_BLUE
+
+	case ansiterm.ANSI_SGR_FOREGROUND_CYAN:
+		windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_GREEN | FOREGROUND_BLUE
+
+	case ansiterm.ANSI_SGR_FOREGROUND_WHITE:
+		windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_RED | FOREGROUND_GREEN | FOREGROUND_BLUE
+
+		// Background colors
+	case ansiterm.ANSI_SGR_BACKGROUND_DEFAULT:
+		// Black with no intensity
+		windowsMode = (windowsMode &^ BACKGROUND_MASK) | (baseMode & BACKGROUND_MASK)
+
+	case ansiterm.ANSI_SGR_BACKGROUND_BLACK:
+		windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK)
+
+	case ansiterm.ANSI_SGR_BACKGROUND_RED:
+		windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_RED
+
+	case ansiterm.ANSI_SGR_BACKGROUND_GREEN:
+		windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_GREEN
+
+	case ansiterm.ANSI_SGR_BACKGROUND_YELLOW:
+		windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_RED | BACKGROUND_GREEN
+
+	case ansiterm.ANSI_SGR_BACKGROUND_BLUE:
+		windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_BLUE
+
+	case ansiterm.ANSI_SGR_BACKGROUND_MAGENTA:
+		windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_RED | BACKGROUND_BLUE
+
+	case ansiterm.ANSI_SGR_BACKGROUND_CYAN:
+		windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_GREEN | BACKGROUND_BLUE
+
+	case ansiterm.ANSI_SGR_BACKGROUND_WHITE:
+		windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_RED | BACKGROUND_GREEN | BACKGROUND_BLUE
+	}
+
+	return windowsMode, inverted
+}
+
+// invertAttributes inverts the foreground and background colors of a Windows attributes value
+func invertAttributes(windowsMode uint16) uint16 {
+	return (COMMON_LVB_MASK & windowsMode) | ((FOREGROUND_MASK & windowsMode) << 4) | ((BACKGROUND_MASK & windowsMode) >> 4)
+}
diff --git a/vendor/github.com/Azure/go-ansiterm/winterm/cursor_helpers.go b/vendor/github.com/Azure/go-ansiterm/winterm/cursor_helpers.go
new file mode 100644
index 00000000..3ee06ea7
--- /dev/null
+++ b/vendor/github.com/Azure/go-ansiterm/winterm/cursor_helpers.go
@@ -0,0 +1,101 @@
+// +build windows
+
+package winterm
+
+const (
+	horizontal = iota
+	vertical
+)
+
+func (h *windowsAnsiEventHandler) getCursorWindow(info *CONSOLE_SCREEN_BUFFER_INFO) SMALL_RECT {
+	if h.originMode {
+		sr := h.effectiveSr(info.Window)
+		return SMALL_RECT{
+			Top:    sr.top,
+			Bottom: sr.bottom,
+			Left:   0,
+			Right:  info.Size.X - 1,
+		}
+	} else {
+		return SMALL_RECT{
+			Top:    info.Window.Top,
+			Bottom: info.Window.Bottom,
+			Left:   0,
+			Right:  info.Size.X - 1,
+		}
+	}
+}
+
+// setCursorPosition sets the cursor to the specified position, bounded to the screen size
+func (h *windowsAnsiEventHandler) setCursorPosition(position COORD, window SMALL_RECT) error {
+	position.X = ensureInRange(position.X, window.Left, window.Right)
+	position.Y = ensureInRange(position.Y, window.Top, window.Bottom)
+	err := SetConsoleCursorPosition(h.fd, position)
+	if err != nil {
+		return err
+	}
+	h.logf("Cursor position set: (%d, %d)", position.X, position.Y)
+	return err
+}
+
+func (h *windowsAnsiEventHandler) moveCursorVertical(param int) error {
+	return h.moveCursor(vertical, param)
+}
+
+func (h *windowsAnsiEventHandler) moveCursorHorizontal(param int) error {
+	return h.moveCursor(horizontal, param)
+}
+
+func (h *windowsAnsiEventHandler) moveCursor(moveMode int, param int) error {
+	info, err := GetConsoleScreenBufferInfo(h.fd)
+	if err != nil {
+		return err
+	}
+
+	position := info.CursorPosition
+	switch moveMode {
+	case horizontal:
+		position.X += int16(param)
+	case vertical:
+		position.Y += int16(param)
+	}
+
+	if err = h.setCursorPosition(position, h.getCursorWindow(info)); err != nil {
+		return err
+	}
+
+	return nil
+}
+
+func (h *windowsAnsiEventHandler) moveCursorLine(param int) error {
+	info, err := GetConsoleScreenBufferInfo(h.fd)
+	if err != nil {
+		return err
+	}
+
+	position := info.CursorPosition
+	position.X = 0
+	position.Y += int16(param)
+
+	if err = h.setCursorPosition(position, h.getCursorWindow(info)); err != nil {
+		return err
+	}
+
+	return nil
+}
+
+func (h *windowsAnsiEventHandler) moveCursorColumn(param int) error {
+	info, err := GetConsoleScreenBufferInfo(h.fd)
+	if err != nil {
+		return err
+	}
+
+	position := info.CursorPosition
+	position.X = int16(param) - 1
+
+	if err = h.setCursorPosition(position, h.getCursorWindow(info)); err != nil {
+		return err
+	}
+
+	return nil
+}
diff --git a/vendor/github.com/Azure/go-ansiterm/winterm/erase_helpers.go b/vendor/github.com/Azure/go-ansiterm/winterm/erase_helpers.go
new file mode 100644
index 00000000..244b5fa2
--- /dev/null
+++ b/vendor/github.com/Azure/go-ansiterm/winterm/erase_helpers.go
@@ -0,0 +1,84 @@
+// +build windows
+
+package winterm
+
+import "github.com/Azure/go-ansiterm"
+
+func (h *windowsAnsiEventHandler) clearRange(attributes uint16, fromCoord COORD, toCoord COORD) error {
+	// Ignore an invalid (negative area) request
+	if toCoord.Y < fromCoord.Y {
+		return nil
+	}
+
+	var err error
+
+	var coordStart = COORD{}
+	var coordEnd = COORD{}
+
+	xCurrent, yCurrent := fromCoord.X, fromCoord.Y
+	xEnd, yEnd := toCoord.X, toCoord.Y
+
+	// Clear any partial initial line
+	if xCurrent > 0 {
+		coordStart.X, coordStart.Y = xCurrent, yCurrent
+		coordEnd.X, coordEnd.Y = xEnd, yCurrent
+
+		err = h.clearRect(attributes, coordStart, coordEnd)
+		if err != nil {
+			return err
+		}
+
+		xCurrent = 0
+		yCurrent += 1
+	}
+
+	// Clear intervening rectangular section
+	if yCurrent < yEnd {
+		coordStart.X, coordStart.Y = xCurrent, yCurrent
+		coordEnd.X, coordEnd.Y = xEnd, yEnd-1
+
+		err = h.clearRect(attributes, coordStart, coordEnd)
+		if err != nil {
+			return err
+		}
+
+		xCurrent = 0
+		yCurrent = yEnd
+	}
+
+	// Clear remaining partial ending line
+	coordStart.X, coordStart.Y = xCurrent, yCurrent
+	coordEnd.X, coordEnd.Y = xEnd, yEnd
+
+	err = h.clearRect(attributes, coordStart, coordEnd)
+	if err != nil {
+		return err
+	}
+
+	return nil
+}
+
+func (h *windowsAnsiEventHandler) clearRect(attributes uint16, fromCoord COORD, toCoord COORD) error {
+	region := SMALL_RECT{Top: fromCoord.Y, Left: fromCoord.X, Bottom: toCoord.Y, Right: toCoord.X}
+	width := toCoord.X - fromCoord.X + 1
+	height := toCoord.Y - fromCoord.Y + 1
+	size := uint32(width) * uint32(height)
+
+	if size <= 0 {
+		return nil
+	}
+
+	buffer := make([]CHAR_INFO, size)
+
+	char := CHAR_INFO{ansiterm.FILL_CHARACTER, attributes}
+	for i := 0; i < int(size); i++ {
+		buffer[i] = char
+	}
+
+	err := WriteConsoleOutput(h.fd, buffer, COORD{X: width, Y: height}, COORD{X: 0, Y: 0}, &region)
+	if err != nil {
+		return err
+	}
+
+	return nil
+}
diff --git a/vendor/github.com/Azure/go-ansiterm/winterm/scroll_helper.go b/vendor/github.com/Azure/go-ansiterm/winterm/scroll_helper.go
new file mode 100644
index 00000000..2d27fa1d
--- /dev/null
+++ b/vendor/github.com/Azure/go-ansiterm/winterm/scroll_helper.go
@@ -0,0 +1,118 @@
+// +build windows
+
+package winterm
+
+// effectiveSr gets the current effective scroll region in buffer coordinates
+func (h *windowsAnsiEventHandler) effectiveSr(window SMALL_RECT) scrollRegion {
+	top := addInRange(window.Top, h.sr.top, window.Top, window.Bottom)
+	bottom := addInRange(window.Top, h.sr.bottom, window.Top, window.Bottom)
+	if top >= bottom {
+		top = window.Top
+		bottom = window.Bottom
+	}
+	return scrollRegion{top: top, bottom: bottom}
+}
+
+func (h *windowsAnsiEventHandler) scrollUp(param int) error {
+	info, err := GetConsoleScreenBufferInfo(h.fd)
+	if err != nil {
+		return err
+	}
+
+	sr := h.effectiveSr(info.Window)
+	return h.scroll(param, sr, info)
+}
+
+func (h *windowsAnsiEventHandler) scrollDown(param int) error {
+	return h.scrollUp(-param)
+}
+
+func (h *windowsAnsiEventHandler) deleteLines(param int) error {
+	info, err := GetConsoleScreenBufferInfo(h.fd)
+	if err != nil {
+		return err
+	}
+
+	start := info.CursorPosition.Y
+	sr := h.effectiveSr(info.Window)
+	// Lines cannot be inserted or deleted outside the scrolling region.
+	if start >= sr.top && start <= sr.bottom {
+		sr.top = start
+		return h.scroll(param, sr, info)
+	} else {
+		return nil
+	}
+}
+
+func (h *windowsAnsiEventHandler) insertLines(param int) error {
+	return h.deleteLines(-param)
+}
+
+// scroll scrolls the provided scroll region by param lines. The scroll region is in buffer coordinates.
+func (h *windowsAnsiEventHandler) scroll(param int, sr scrollRegion, info *CONSOLE_SCREEN_BUFFER_INFO) error {
+	h.logf("scroll: scrollTop: %d, scrollBottom: %d", sr.top, sr.bottom)
+	h.logf("scroll: windowTop: %d, windowBottom: %d", info.Window.Top, info.Window.Bottom)
+
+	// Copy from and clip to the scroll region (full buffer width)
+	scrollRect := SMALL_RECT{
+		Top:    sr.top,
+		Bottom: sr.bottom,
+		Left:   0,
+		Right:  info.Size.X - 1,
+	}
+
+	// Origin to which area should be copied
+	destOrigin := COORD{
+		X: 0,
+		Y: sr.top - int16(param),
+	}
+
+	char := CHAR_INFO{
+		UnicodeChar: ' ',
+		Attributes:  h.attributes,
+	}
+
+	if err := ScrollConsoleScreenBuffer(h.fd, scrollRect, scrollRect, destOrigin, char); err != nil {
+		return err
+	}
+	return nil
+}
+
+func (h *windowsAnsiEventHandler) deleteCharacters(param int) error {
+	info, err := GetConsoleScreenBufferInfo(h.fd)
+	if err != nil {
+		return err
+	}
+	return h.scrollLine(param, info.CursorPosition, info)
+}
+
+func (h *windowsAnsiEventHandler) insertCharacters(param int) error {
+	return h.deleteCharacters(-param)
+}
+
+// scrollLine scrolls a line horizontally starting at the provided position by a number of columns.
+func (h *windowsAnsiEventHandler) scrollLine(columns int, position COORD, info *CONSOLE_SCREEN_BUFFER_INFO) error {
+	// Copy from and clip to the scroll region (full buffer width)
+	scrollRect := SMALL_RECT{
+		Top:    position.Y,
+		Bottom: position.Y,
+		Left:   position.X,
+		Right:  info.Size.X - 1,
+	}
+
+	// Origin to which area should be copied
+	destOrigin := COORD{
+		X: position.X - int16(columns),
+		Y: position.Y,
+	}
+
+	char := CHAR_INFO{
+		UnicodeChar: ' ',
+		Attributes:  h.attributes,
+	}
+
+	if err := ScrollConsoleScreenBuffer(h.fd, scrollRect, scrollRect, destOrigin, char); err != nil {
+		return err
+	}
+	return nil
+}
diff --git a/vendor/github.com/Azure/go-ansiterm/winterm/utilities.go b/vendor/github.com/Azure/go-ansiterm/winterm/utilities.go
new file mode 100644
index 00000000..afa7635d
--- /dev/null
+++ b/vendor/github.com/Azure/go-ansiterm/winterm/utilities.go
@@ -0,0 +1,9 @@
+// +build windows
+
+package winterm
+
+// AddInRange increments a value by the passed quantity while ensuring the values
+// always remain within the supplied min / max range.
+func addInRange(n int16, increment int16, min int16, max int16) int16 {
+	return ensureInRange(n+increment, min, max)
+}
diff --git a/vendor/github.com/Azure/go-ansiterm/winterm/win_event_handler.go b/vendor/github.com/Azure/go-ansiterm/winterm/win_event_handler.go
new file mode 100644
index 00000000..2d40fb75
--- /dev/null
+++ b/vendor/github.com/Azure/go-ansiterm/winterm/win_event_handler.go
@@ -0,0 +1,743 @@
+// +build windows
+
+package winterm
+
+import (
+	"bytes"
+	"log"
+	"os"
+	"strconv"
+
+	"github.com/Azure/go-ansiterm"
+)
+
+type windowsAnsiEventHandler struct {
+	fd             uintptr
+	file           *os.File
+	infoReset      *CONSOLE_SCREEN_BUFFER_INFO
+	sr             scrollRegion
+	buffer         bytes.Buffer
+	attributes     uint16
+	inverted       bool
+	wrapNext       bool
+	drewMarginByte bool
+	originMode     bool
+	marginByte     byte
+	curInfo        *CONSOLE_SCREEN_BUFFER_INFO
+	curPos         COORD
+	logf           func(string, ...interface{})
+}
+
+type Option func(*windowsAnsiEventHandler)
+
+func WithLogf(f func(string, ...interface{})) Option {
+	return func(w *windowsAnsiEventHandler) {
+		w.logf = f
+	}
+}
+
+func CreateWinEventHandler(fd uintptr, file *os.File, opts ...Option) ansiterm.AnsiEventHandler {
+	infoReset, err := GetConsoleScreenBufferInfo(fd)
+	if err != nil {
+		return nil
+	}
+
+	h := &windowsAnsiEventHandler{
+		fd:         fd,
+		file:       file,
+		infoReset:  infoReset,
+		attributes: infoReset.Attributes,
+	}
+	for _, o := range opts {
+		o(h)
+	}
+
+	if isDebugEnv := os.Getenv(ansiterm.LogEnv); isDebugEnv == "1" {
+		logFile, _ := os.Create("winEventHandler.log")
+		logger := log.New(logFile, "", log.LstdFlags)
+		if h.logf != nil {
+			l := h.logf
+			h.logf = func(s string, v ...interface{}) {
+				l(s, v...)
+				logger.Printf(s, v...)
+			}
+		} else {
+			h.logf = logger.Printf
+		}
+	}
+
+	if h.logf == nil {
+		h.logf = func(string, ...interface{}) {}
+	}
+
+	return h
+}
+
+type scrollRegion struct {
+	top    int16
+	bottom int16
+}
+
+// simulateLF simulates a LF or CR+LF by scrolling if necessary to handle the
+// current cursor position and scroll region settings, in which case it returns
+// true. If no special handling is necessary, then it does nothing and returns
+// false.
+//
+// In the false case, the caller should ensure that a carriage return
+// and line feed are inserted or that the text is otherwise wrapped.
+func (h *windowsAnsiEventHandler) simulateLF(includeCR bool) (bool, error) {
+	if h.wrapNext {
+		if err := h.Flush(); err != nil {
+			return false, err
+		}
+		h.clearWrap()
+	}
+	pos, info, err := h.getCurrentInfo()
+	if err != nil {
+		return false, err
+	}
+	sr := h.effectiveSr(info.Window)
+	if pos.Y == sr.bottom {
+		// Scrolling is necessary. Let Windows automatically scroll if the scrolling region
+		// is the full window.
+		if sr.top == info.Window.Top && sr.bottom == info.Window.Bottom {
+			if includeCR {
+				pos.X = 0
+				h.updatePos(pos)
+			}
+			return false, nil
+		}
+
+		// A custom scroll region is active. Scroll the window manually to simulate
+		// the LF.
+		if err := h.Flush(); err != nil {
+			return false, err
+		}
+		h.logf("Simulating LF inside scroll region")
+		if err := h.scrollUp(1); err != nil {
+			return false, err
+		}
+		if includeCR {
+			pos.X = 0
+			if err := SetConsoleCursorPosition(h.fd, pos); err != nil {
+				return false, err
+			}
+		}
+		return true, nil
+
+	} else if pos.Y < info.Window.Bottom {
+		// Let Windows handle the LF.
+		pos.Y++
+		if includeCR {
+			pos.X = 0
+		}
+		h.updatePos(pos)
+		return false, nil
+	} else {
+		// The cursor is at the bottom of the screen but outside the scroll
+		// region. Skip the LF.
+		h.logf("Simulating LF outside scroll region")
+		if includeCR {
+			if err := h.Flush(); err != nil {
+				return false, err
+			}
+			pos.X = 0
+			if err := SetConsoleCursorPosition(h.fd, pos); err != nil {
+				return false, err
+			}
+		}
+		return true, nil
+	}
+}
+
+// executeLF executes a LF without a CR.
+func (h *windowsAnsiEventHandler) executeLF() error {
+	handled, err := h.simulateLF(false)
+	if err != nil {
+		return err
+	}
+	if !handled {
+		// Windows LF will reset the cursor column position. Write the LF
+		// and restore the cursor position.
+		pos, _, err := h.getCurrentInfo()
+		if err != nil {
+			return err
+		}
+		h.buffer.WriteByte(ansiterm.ANSI_LINE_FEED)
+		if pos.X != 0 {
+			if err := h.Flush(); err != nil {
+				return err
+			}
+			h.logf("Resetting cursor position for LF without CR")
+			if err := SetConsoleCursorPosition(h.fd, pos); err != nil {
+				return err
+			}
+		}
+	}
+	return nil
+}
+
+func (h *windowsAnsiEventHandler) Print(b byte) error {
+	if h.wrapNext {
+		h.buffer.WriteByte(h.marginByte)
+		h.clearWrap()
+		if _, err := h.simulateLF(true); err != nil {
+			return err
+		}
+	}
+	pos, info, err := h.getCurrentInfo()
+	if err != nil {
+		return err
+	}
+	if pos.X == info.Size.X-1 {
+		h.wrapNext = true
+		h.marginByte = b
+	} else {
+		pos.X++
+		h.updatePos(pos)
+		h.buffer.WriteByte(b)
+	}
+	return nil
+}
+
+func (h *windowsAnsiEventHandler) Execute(b byte) error {
+	switch b {
+	case ansiterm.ANSI_TAB:
+		h.logf("Execute(TAB)")
+		// Move to the next tab stop, but preserve auto-wrap if already set.
+		if !h.wrapNext {
+			pos, info, err := h.getCurrentInfo()
+			if err != nil {
+				return err
+			}
+			pos.X = (pos.X + 8) - pos.X%8
+			if pos.X >= info.Size.X {
+				pos.X = info.Size.X - 1
+			}
+			if err := h.Flush(); err != nil {
+				return err
+			}
+			if err := SetConsoleCursorPosition(h.fd, pos); err != nil {
+				return err
+			}
+		}
+		return nil
+
+	case ansiterm.ANSI_BEL:
+		h.buffer.WriteByte(ansiterm.ANSI_BEL)
+		return nil
+
+	case ansiterm.ANSI_BACKSPACE:
+		if h.wrapNext {
+			if err := h.Flush(); err != nil {
+				return err
+			}
+			h.clearWrap()
+		}
+		pos, _, err := h.getCurrentInfo()
+		if err != nil {
+			return err
+		}
+		if pos.X > 0 {
+			pos.X--
+			h.updatePos(pos)
+			h.buffer.WriteByte(ansiterm.ANSI_BACKSPACE)
+		}
+		return nil
+
+	case ansiterm.ANSI_VERTICAL_TAB, ansiterm.ANSI_FORM_FEED:
+		// Treat as true LF.
+		return h.executeLF()
+
+	case ansiterm.ANSI_LINE_FEED:
+		// Simulate a CR and LF for now since there is no way in go-ansiterm
+		// to tell if the LF should include CR (and more things break when it's
+		// missing than when it's incorrectly added).
+		handled, err := h.simulateLF(true)
+		if handled || err != nil {
+			return err
+		}
+		return h.buffer.WriteByte(ansiterm.ANSI_LINE_FEED)
+
+	case ansiterm.ANSI_CARRIAGE_RETURN:
+		if h.wrapNext {
+			if err := h.Flush(); err != nil {
+				return err
+			}
+			h.clearWrap()
+		}
+		pos, _, err := h.getCurrentInfo()
+		if err != nil {
+			return err
+		}
+		if pos.X != 0 {
+			pos.X = 0
+			h.updatePos(pos)
+			h.buffer.WriteByte(ansiterm.ANSI_CARRIAGE_RETURN)
+		}
+		return nil
+
+	default:
+		return nil
+	}
+}
+
+func (h *windowsAnsiEventHandler) CUU(param int) error {
+	if err := h.Flush(); err != nil {
+		return err
+	}
+	h.logf("CUU: [%v]", []string{strconv.Itoa(param)})
+	h.clearWrap()
+	return h.moveCursorVertical(-param)
+}
+
+func (h *windowsAnsiEventHandler) CUD(param int) error {
+	if err := h.Flush(); err != nil {
+		return err
+	}
+	h.logf("CUD: [%v]", []string{strconv.Itoa(param)})
+	h.clearWrap()
+	return h.moveCursorVertical(param)
+}
+
+func (h *windowsAnsiEventHandler) CUF(param int) error {
+	if err := h.Flush(); err != nil {
+		return err
+	}
+	h.logf("CUF: [%v]", []string{strconv.Itoa(param)})
+	h.clearWrap()
+	return h.moveCursorHorizontal(param)
+}
+
+func (h *windowsAnsiEventHandler) CUB(param int) error {
+	if err := h.Flush(); err != nil {
+		return err
+	}
+	h.logf("CUB: [%v]", []string{strconv.Itoa(param)})
+	h.clearWrap()
+	return h.moveCursorHorizontal(-param)
+}
+
+func (h *windowsAnsiEventHandler) CNL(param int) error {
+	if err := h.Flush(); err != nil {
+		return err
+	}
+	h.logf("CNL: [%v]", []string{strconv.Itoa(param)})
+	h.clearWrap()
+	return h.moveCursorLine(param)
+}
+
+func (h *windowsAnsiEventHandler) CPL(param int) error {
+	if err := h.Flush(); err != nil {
+		return err
+	}
+	h.logf("CPL: [%v]", []string{strconv.Itoa(param)})
+	h.clearWrap()
+	return h.moveCursorLine(-param)
+}
+
+func (h *windowsAnsiEventHandler) CHA(param int) error {
+	if err := h.Flush(); err != nil {
+		return err
+	}
+	h.logf("CHA: [%v]", []string{strconv.Itoa(param)})
+	h.clearWrap()
+	return h.moveCursorColumn(param)
+}
+
+func (h *windowsAnsiEventHandler) VPA(param int) error {
+	if err := h.Flush(); err != nil {
+		return err
+	}
+	h.logf("VPA: [[%d]]", param)
+	h.clearWrap()
+	info, err := GetConsoleScreenBufferInfo(h.fd)
+	if err != nil {
+		return err
+	}
+	window := h.getCursorWindow(info)
+	position := info.CursorPosition
+	position.Y = window.Top + int16(param) - 1
+	return h.setCursorPosition(position, window)
+}
+
+func (h *windowsAnsiEventHandler) CUP(row int, col int) error {
+	if err := h.Flush(); err != nil {
+		return err
+	}
+	h.logf("CUP: [[%d %d]]", row, col)
+	h.clearWrap()
+	info, err := GetConsoleScreenBufferInfo(h.fd)
+	if err != nil {
+		return err
+	}
+
+	window := h.getCursorWindow(info)
+	position := COORD{window.Left + int16(col) - 1, window.Top + int16(row) - 1}
+	return h.setCursorPosition(position, window)
+}
+
+func (h *windowsAnsiEventHandler) HVP(row int, col int) error {
+	if err := h.Flush(); err != nil {
+		return err
+	}
+	h.logf("HVP: [[%d %d]]", row, col)
+	h.clearWrap()
+	return h.CUP(row, col)
+}
+
+func (h *windowsAnsiEventHandler) DECTCEM(visible bool) error {
+	if err := h.Flush(); err != nil {
+		return err
+	}
+	h.logf("DECTCEM: [%v]", []string{strconv.FormatBool(visible)})
+	h.clearWrap()
+	return nil
+}
+
+func (h *windowsAnsiEventHandler) DECOM(enable bool) error {
+	if err := h.Flush(); err != nil {
+		return err
+	}
+	h.logf("DECOM: [%v]", []string{strconv.FormatBool(enable)})
+	h.clearWrap()
+	h.originMode = enable
+	return h.CUP(1, 1)
+}
+
+func (h *windowsAnsiEventHandler) DECCOLM(use132 bool) error {
+	if err := h.Flush(); err != nil {
+		return err
+	}
+	h.logf("DECCOLM: [%v]", []string{strconv.FormatBool(use132)})
+	h.clearWrap()
+	if err := h.ED(2); err != nil {
+		return err
+	}
+	info, err := GetConsoleScreenBufferInfo(h.fd)
+	if err != nil {
+		return err
+	}
+	targetWidth := int16(80)
+	if use132 {
+		targetWidth = 132
+	}
+	if info.Size.X < targetWidth {
+		if err := SetConsoleScreenBufferSize(h.fd, COORD{targetWidth, info.Size.Y}); err != nil {
+			h.logf("set buffer failed: %v", err)
+			return err
+		}
+	}
+	window := info.Window
+	window.Left = 0
+	window.Right = targetWidth - 1
+	if err := SetConsoleWindowInfo(h.fd, true, window); err != nil {
+		h.logf("set window failed: %v", err)
+		return err
+	}
+	if info.Size.X > targetWidth {
+		if err := SetConsoleScreenBufferSize(h.fd, COORD{targetWidth, info.Size.Y}); err != nil {
+			h.logf("set buffer failed: %v", err)
+			return err
+		}
+	}
+	return SetConsoleCursorPosition(h.fd, COORD{0, 0})
+}
+
+func (h *windowsAnsiEventHandler) ED(param int) error {
+	if err := h.Flush(); err != nil {
+		return err
+	}
+	h.logf("ED: [%v]", []string{strconv.Itoa(param)})
+	h.clearWrap()
+
+	// [J  -- Erases from the cursor to the end of the screen, including the cursor position.
+	// [1J -- Erases from the beginning of the screen to the cursor, including the cursor position.
+	// [2J -- Erases the complete display. The cursor does not move.
+	// Notes:
+	// -- Clearing the entire buffer, versus just the Window, works best for Windows Consoles
+
+	info, err := GetConsoleScreenBufferInfo(h.fd)
+	if err != nil {
+		return err
+	}
+
+	var start COORD
+	var end COORD
+
+	switch param {
+	case 0:
+		start = info.CursorPosition
+		end = COORD{info.Size.X - 1, info.Size.Y - 1}
+
+	case 1:
+		start = COORD{0, 0}
+		end = info.CursorPosition
+
+	case 2:
+		start = COORD{0, 0}
+		end = COORD{info.Size.X - 1, info.Size.Y - 1}
+	}
+
+	err = h.clearRange(h.attributes, start, end)
+	if err != nil {
+		return err
+	}
+
+	// If the whole buffer was cleared, move the window to the top while preserving
+	// the window-relative cursor position.
+	if param == 2 {
+		pos := info.CursorPosition
+		window := info.Window
+		pos.Y -= window.Top
+		window.Bottom -= window.Top
+		window.Top = 0
+		if err := SetConsoleCursorPosition(h.fd, pos); err != nil {
+			return err
+		}
+		if err := SetConsoleWindowInfo(h.fd, true, window); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (h *windowsAnsiEventHandler) EL(param int) error {
+	if err := h.Flush(); err != nil {
+		return err
+	}
+	h.logf("EL: [%v]", strconv.Itoa(param))
+	h.clearWrap()
+
+	// [K  -- Erases from the cursor to the end of the line, including the cursor position.
+	// [1K -- Erases from the beginning of the line to the cursor, including the cursor position.
+	// [2K -- Erases the complete line.
+
+	info, err := GetConsoleScreenBufferInfo(h.fd)
+	if err != nil {
+		return err
+	}
+
+	var start COORD
+	var end COORD
+
+	switch param {
+	case 0:
+		start = info.CursorPosition
+		end = COORD{info.Size.X, info.CursorPosition.Y}
+
+	case 1:
+		start = COORD{0, info.CursorPosition.Y}
+		end = info.CursorPosition
+
+	case 2:
+		start = COORD{0, info.CursorPosition.Y}
+		end = COORD{info.Size.X, info.CursorPosition.Y}
+	}
+
+	err = h.clearRange(h.attributes, start, end)
+	if err != nil {
+		return err
+	}
+
+	return nil
+}
+
+func (h *windowsAnsiEventHandler) IL(param int) error {
+	if err := h.Flush(); err != nil {
+		return err
+	}
+	h.logf("IL: [%v]", strconv.Itoa(param))
+	h.clearWrap()
+	return h.insertLines(param)
+}
+
+func (h *windowsAnsiEventHandler) DL(param int) error {
+	if err := h.Flush(); err != nil {
+		return err
+	}
+	h.logf("DL: [%v]", strconv.Itoa(param))
+	h.clearWrap()
+	return h.deleteLines(param)
+}
+
+func (h *windowsAnsiEventHandler) ICH(param int) error {
+	if err := h.Flush(); err != nil {
+		return err
+	}
+	h.logf("ICH: [%v]", strconv.Itoa(param))
+	h.clearWrap()
+	return h.insertCharacters(param)
+}
+
+func (h *windowsAnsiEventHandler) DCH(param int) error {
+	if err := h.Flush(); err != nil {
+		return err
+	}
+	h.logf("DCH: [%v]", strconv.Itoa(param))
+	h.clearWrap()
+	return h.deleteCharacters(param)
+}
+
+func (h *windowsAnsiEventHandler) SGR(params []int) error {
+	if err := h.Flush(); err != nil {
+		return err
+	}
+	strings := []string{}
+	for _, v := range params {
+		strings = append(strings, strconv.Itoa(v))
+	}
+
+	h.logf("SGR: [%v]", strings)
+
+	if len(params) <= 0 {
+		h.attributes = h.infoReset.Attributes
+		h.inverted = false
+	} else {
+		for _, attr := range params {
+
+			if attr == ansiterm.ANSI_SGR_RESET {
+				h.attributes = h.infoReset.Attributes
+				h.inverted = false
+				continue
+			}
+
+			h.attributes, h.inverted = collectAnsiIntoWindowsAttributes(h.attributes, h.inverted, h.infoReset.Attributes, int16(attr))
+		}
+	}
+
+	attributes := h.attributes
+	if h.inverted {
+		attributes = invertAttributes(attributes)
+	}
+	err := SetConsoleTextAttribute(h.fd, attributes)
+	if err != nil {
+		return err
+	}
+
+	return nil
+}
+
+func (h *windowsAnsiEventHandler) SU(param int) error {
+	if err := h.Flush(); err != nil {
+		return err
+	}
+	h.logf("SU: [%v]", []string{strconv.Itoa(param)})
+	h.clearWrap()
+	return h.scrollUp(param)
+}
+
+func (h *windowsAnsiEventHandler) SD(param int) error {
+	if err := h.Flush(); err != nil {
+		return err
+	}
+	h.logf("SD: [%v]", []string{strconv.Itoa(param)})
+	h.clearWrap()
+	return h.scrollDown(param)
+}
+
+func (h *windowsAnsiEventHandler) DA(params []string) error {
+	h.logf("DA: [%v]", params)
+	// DA cannot be implemented because it must send data on the VT100 input stream,
+	// which is not available to go-ansiterm.
+	return nil
+}
+
+func (h *windowsAnsiEventHandler) DECSTBM(top int, bottom int) error {
+	if err := h.Flush(); err != nil {
+		return err
+	}
+	h.logf("DECSTBM: [%d, %d]", top, bottom)
+
+	// Windows is 0 indexed, Linux is 1 indexed
+	h.sr.top = int16(top - 1)
+	h.sr.bottom = int16(bottom - 1)
+
+	// This command also moves the cursor to the origin.
+	h.clearWrap()
+	return h.CUP(1, 1)
+}
+
+func (h *windowsAnsiEventHandler) RI() error {
+	if err := h.Flush(); err != nil {
+		return err
+	}
+	h.logf("RI: []")
+	h.clearWrap()
+
+	info, err := GetConsoleScreenBufferInfo(h.fd)
+	if err != nil {
+		return err
+	}
+
+	sr := h.effectiveSr(info.Window)
+	if info.CursorPosition.Y == sr.top {
+		return h.scrollDown(1)
+	}
+
+	return h.moveCursorVertical(-1)
+}
+
+func (h *windowsAnsiEventHandler) IND() error {
+	h.logf("IND: []")
+	return h.executeLF()
+}
+
+func (h *windowsAnsiEventHandler) Flush() error {
+	h.curInfo = nil
+	if h.buffer.Len() > 0 {
+		h.logf("Flush: [%s]", h.buffer.Bytes())
+		if _, err := h.buffer.WriteTo(h.file); err != nil {
+			return err
+		}
+	}
+
+	if h.wrapNext && !h.drewMarginByte {
+		h.logf("Flush: drawing margin byte '%c'", h.marginByte)
+
+		info, err := GetConsoleScreenBufferInfo(h.fd)
+		if err != nil {
+			return err
+		}
+
+		charInfo := []CHAR_INFO{{UnicodeChar: uint16(h.marginByte), Attributes: info.Attributes}}
+		size := COORD{1, 1}
+		position := COORD{0, 0}
+		region := SMALL_RECT{Left: info.CursorPosition.X, Top: info.CursorPosition.Y, Right: info.CursorPosition.X, Bottom: info.CursorPosition.Y}
+		if err := WriteConsoleOutput(h.fd, charInfo, size, position, &region); err != nil {
+			return err
+		}
+		h.drewMarginByte = true
+	}
+	return nil
+}
+
+// cacheConsoleInfo ensures that the current console screen information has been queried
+// since the last call to Flush(). It must be called before accessing h.curInfo or h.curPos.
+func (h *windowsAnsiEventHandler) getCurrentInfo() (COORD, *CONSOLE_SCREEN_BUFFER_INFO, error) {
+	if h.curInfo == nil {
+		info, err := GetConsoleScreenBufferInfo(h.fd)
+		if err != nil {
+			return COORD{}, nil, err
+		}
+		h.curInfo = info
+		h.curPos = info.CursorPosition
+	}
+	return h.curPos, h.curInfo, nil
+}
+
+func (h *windowsAnsiEventHandler) updatePos(pos COORD) {
+	if h.curInfo == nil {
+		panic("failed to call getCurrentInfo before calling updatePos")
+	}
+	h.curPos = pos
+}
+
+// clearWrap clears the state where the cursor is in the margin
+// waiting for the next character before wrapping the line. This must
+// be done before most operations that act on the cursor.
+func (h *windowsAnsiEventHandler) clearWrap() {
+	h.wrapNext = false
+	h.drewMarginByte = false
+}
diff --git a/vendor/github.com/MakeNowJust/heredoc/LICENSE b/vendor/github.com/MakeNowJust/heredoc/LICENSE
new file mode 100644
index 00000000..8a58c222
--- /dev/null
+++ b/vendor/github.com/MakeNowJust/heredoc/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2014-2017 TSUYUSATO Kitsune
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/vendor/github.com/MakeNowJust/heredoc/README.md b/vendor/github.com/MakeNowJust/heredoc/README.md
new file mode 100644
index 00000000..a3a65fab
--- /dev/null
+++ b/vendor/github.com/MakeNowJust/heredoc/README.md
@@ -0,0 +1,53 @@
+# heredoc [![CircleCI](https://circleci.com/gh/MakeNowJust/heredoc.svg?style=svg)](https://circleci.com/gh/MakeNowJust/heredoc) [![Go Walker](http://gowalker.org/api/v1/badge)](https://gowalker.org/github.com/MakeNowJust/heredoc)
+
+## About
+
+Package heredoc provides the here-document with keeping indent.
+
+## Install
+
+```console
+$ go get github.com/MakeNowJust/heredoc
+```
+
+## Import
+
+```go
+// usual
+import "github.com/MakeNowJust/heredoc"
+// shortcuts
+import . "github.com/MakeNowJust/heredoc/dot"
+```
+
+## Example
+
+```go
+package main
+
+import (
+	"fmt"
+	. "github.com/MakeNowJust/heredoc/dot"
+)
+
+func main() {
+	fmt.Println(D(`
+		Lorem ipsum dolor sit amet, consectetur adipisicing elit,
+		sed do eiusmod tempor incididunt ut labore et dolore magna
+		aliqua. Ut enim ad minim veniam, ...
+	`))
+	// Output:
+	// Lorem ipsum dolor sit amet, consectetur adipisicing elit,
+	// sed do eiusmod tempor incididunt ut labore et dolore magna
+	// aliqua. Ut enim ad minim veniam, ...
+	//
+}
+```
+
+## API Document
+
+ - [Go Walker - github.com/MakeNowJust/heredoc](https://gowalker.org/github.com/MakeNowJust/heredoc)
+ - [Go Walker - github.com/MakeNowJust/heredoc/dot](https://gowalker.org/github.com/MakeNowJust/heredoc/dot)
+
+## License
+
+This software is released under the MIT License, see LICENSE.
diff --git a/vendor/github.com/MakeNowJust/heredoc/heredoc.go b/vendor/github.com/MakeNowJust/heredoc/heredoc.go
new file mode 100644
index 00000000..fea12e62
--- /dev/null
+++ b/vendor/github.com/MakeNowJust/heredoc/heredoc.go
@@ -0,0 +1,98 @@
+// Copyright (c) 2014-2017 TSUYUSATO Kitsune
+// This software is released under the MIT License.
+// http://opensource.org/licenses/mit-license.php
+
+// Package heredoc provides creation of here-documents from raw strings.
+//
+// Golang supports raw-string syntax.
+//     doc := `
+//     	Foo
+//     	Bar
+//     `
+// But raw-string cannot recognize indentation. Thus such content is an indented string, equivalent to
+//     "\n\tFoo\n\tBar\n"
+// I dont't want this!
+//
+// However this problem is solved by package heredoc.
+//     doc := heredoc.Doc(`
+//     	Foo
+//     	Bar
+//     `)
+// Is equivalent to
+//     "Foo\nBar\n"
+package heredoc
+
+import (
+	"fmt"
+	"strings"
+	"unicode"
+)
+
+const maxInt = int(^uint(0) >> 1)
+
+// Doc returns un-indented string as here-document.
+func Doc(raw string) string {
+	skipFirstLine := false
+	if raw[0] == '\n' {
+		raw = raw[1:]
+	} else {
+		skipFirstLine = true
+	}
+
+	lines := strings.Split(raw, "\n")
+
+	minIndentSize := getMinIndent(lines, skipFirstLine)
+	lines = removeIndentation(lines, minIndentSize, skipFirstLine)
+
+	return strings.Join(lines, "\n")
+}
+
+// getMinIndent calculates the minimum indentation in lines, excluding empty lines.
+func getMinIndent(lines []string, skipFirstLine bool) int {
+	minIndentSize := maxInt
+
+	for i, line := range lines {
+		if i == 0 && skipFirstLine {
+			continue
+		}
+
+		indentSize := 0
+		for _, r := range []rune(line) {
+			if unicode.IsSpace(r) {
+				indentSize += 1
+			} else {
+				break
+			}
+		}
+
+		if len(line) == indentSize {
+			if i == len(lines)-1 && indentSize < minIndentSize {
+				lines[i] = ""
+			}
+		} else if indentSize < minIndentSize {
+			minIndentSize = indentSize
+		}
+	}
+	return minIndentSize
+}
+
+// removeIndentation removes n characters from the front of each line in lines.
+// Skips first line if skipFirstLine is true, skips empty lines.
+func removeIndentation(lines []string, n int, skipFirstLine bool) []string {
+	for i, line := range lines {
+		if i == 0 && skipFirstLine {
+			continue
+		}
+
+		if len(lines[i]) >= n {
+			lines[i] = line[n:]
+		}
+	}
+	return lines
+}
+
+// Docf returns unindented and formatted string as here-document.
+// Formatting is done as for fmt.Printf().
+func Docf(raw string, args ...interface{}) string {
+	return fmt.Sprintf(Doc(raw), args...)
+}
diff --git a/vendor/github.com/PuerkitoBio/purell/.gitignore b/vendor/github.com/PuerkitoBio/purell/.gitignore
new file mode 100644
index 00000000..748e4c80
--- /dev/null
+++ b/vendor/github.com/PuerkitoBio/purell/.gitignore
@@ -0,0 +1,5 @@
+*.sublime-*
+.DS_Store
+*.swp
+*.swo
+tags
diff --git a/vendor/github.com/PuerkitoBio/purell/.travis.yml b/vendor/github.com/PuerkitoBio/purell/.travis.yml
new file mode 100644
index 00000000..cf31e6af
--- /dev/null
+++ b/vendor/github.com/PuerkitoBio/purell/.travis.yml
@@ -0,0 +1,12 @@
+language: go
+
+go:
+    - 1.4.x
+    - 1.5.x
+    - 1.6.x
+    - 1.7.x
+    - 1.8.x
+    - 1.9.x
+    - "1.10.x"
+    - "1.11.x"
+    - tip
diff --git a/vendor/github.com/PuerkitoBio/purell/LICENSE b/vendor/github.com/PuerkitoBio/purell/LICENSE
new file mode 100644
index 00000000..4b9986de
--- /dev/null
+++ b/vendor/github.com/PuerkitoBio/purell/LICENSE
@@ -0,0 +1,12 @@
+Copyright (c) 2012, Martin Angers
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
+
+* Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
+
+* Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
+
+* Neither the name of the author nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/PuerkitoBio/purell/README.md b/vendor/github.com/PuerkitoBio/purell/README.md
new file mode 100644
index 00000000..07de0c49
--- /dev/null
+++ b/vendor/github.com/PuerkitoBio/purell/README.md
@@ -0,0 +1,188 @@
+# Purell
+
+Purell is a tiny Go library to normalize URLs. It returns a pure URL. Pure-ell. Sanitizer and all. Yeah, I know...
+
+Based on the [wikipedia paper][wiki] and the [RFC 3986 document][rfc].
+
+[![build status](https://travis-ci.org/PuerkitoBio/purell.svg?branch=master)](http://travis-ci.org/PuerkitoBio/purell)
+
+## Install
+
+`go get github.com/PuerkitoBio/purell`
+
+## Changelog
+
+*    **v1.1.1** : Fix failing test due to Go1.12 changes (thanks to @ianlancetaylor).
+*    **2016-11-14 (v1.1.0)** : IDN: Conform to RFC 5895: Fold character width (thanks to @beeker1121).
+*    **2016-07-27 (v1.0.0)** : Normalize IDN to ASCII (thanks to @zenovich).
+*    **2015-02-08** : Add fix for relative paths issue ([PR #5][pr5]) and add fix for unnecessary encoding of reserved characters ([see issue #7][iss7]).
+*    **v0.2.0** : Add benchmarks, Attempt IDN support.
+*    **v0.1.0** : Initial release.
+
+## Examples
+
+From `example_test.go` (note that in your code, you would import "github.com/PuerkitoBio/purell", and would prefix references to its methods and constants with "purell."):
+
+```go
+package purell
+
+import (
+  "fmt"
+  "net/url"
+)
+
+func ExampleNormalizeURLString() {
+  if normalized, err := NormalizeURLString("hTTp://someWEBsite.com:80/Amazing%3f/url/",
+    FlagLowercaseScheme|FlagLowercaseHost|FlagUppercaseEscapes); err != nil {
+    panic(err)
+  } else {
+    fmt.Print(normalized)
+  }
+  // Output: http://somewebsite.com:80/Amazing%3F/url/
+}
+
+func ExampleMustNormalizeURLString() {
+  normalized := MustNormalizeURLString("hTTpS://someWEBsite.com:443/Amazing%fa/url/",
+    FlagsUnsafeGreedy)
+  fmt.Print(normalized)
+
+  // Output: http://somewebsite.com/Amazing%FA/url
+}
+
+func ExampleNormalizeURL() {
+  if u, err := url.Parse("Http://SomeUrl.com:8080/a/b/.././c///g?c=3&a=1&b=9&c=0#target"); err != nil {
+    panic(err)
+  } else {
+    normalized := NormalizeURL(u, FlagsUsuallySafeGreedy|FlagRemoveDuplicateSlashes|FlagRemoveFragment)
+    fmt.Print(normalized)
+  }
+
+  // Output: http://someurl.com:8080/a/c/g?c=3&a=1&b=9&c=0
+}
+```
+
+## API
+
+As seen in the examples above, purell offers three methods, `NormalizeURLString(string, NormalizationFlags) (string, error)`, `MustNormalizeURLString(string, NormalizationFlags) (string)` and `NormalizeURL(*url.URL, NormalizationFlags) (string)`. They all normalize the provided URL based on the specified flags. Here are the available flags:
+
+```go
+const (
+	// Safe normalizations
+	FlagLowercaseScheme           NormalizationFlags = 1 << iota // HTTP://host -> http://host, applied by default in Go1.1
+	FlagLowercaseHost                                            // http://HOST -> http://host
+	FlagUppercaseEscapes                                         // http://host/t%ef -> http://host/t%EF
+	FlagDecodeUnnecessaryEscapes                                 // http://host/t%41 -> http://host/tA
+	FlagEncodeNecessaryEscapes                                   // http://host/!"#$ -> http://host/%21%22#$
+	FlagRemoveDefaultPort                                        // http://host:80 -> http://host
+	FlagRemoveEmptyQuerySeparator                                // http://host/path? -> http://host/path
+
+	// Usually safe normalizations
+	FlagRemoveTrailingSlash // http://host/path/ -> http://host/path
+	FlagAddTrailingSlash    // http://host/path -> http://host/path/ (should choose only one of these add/remove trailing slash flags)
+	FlagRemoveDotSegments   // http://host/path/./a/b/../c -> http://host/path/a/c
+
+	// Unsafe normalizations
+	FlagRemoveDirectoryIndex   // http://host/path/index.html -> http://host/path/
+	FlagRemoveFragment         // http://host/path#fragment -> http://host/path
+	FlagForceHTTP              // https://host -> http://host
+	FlagRemoveDuplicateSlashes // http://host/path//a///b -> http://host/path/a/b
+	FlagRemoveWWW              // http://www.host/ -> http://host/
+	FlagAddWWW                 // http://host/ -> http://www.host/ (should choose only one of these add/remove WWW flags)
+	FlagSortQuery              // http://host/path?c=3&b=2&a=1&b=1 -> http://host/path?a=1&b=1&b=2&c=3
+
+	// Normalizations not in the wikipedia article, required to cover tests cases
+	// submitted by jehiah
+	FlagDecodeDWORDHost           // http://1113982867 -> http://66.102.7.147
+	FlagDecodeOctalHost           // http://0102.0146.07.0223 -> http://66.102.7.147
+	FlagDecodeHexHost             // http://0x42660793 -> http://66.102.7.147
+	FlagRemoveUnnecessaryHostDots // http://.host../path -> http://host/path
+	FlagRemoveEmptyPortSeparator  // http://host:/path -> http://host/path
+
+	// Convenience set of safe normalizations
+	FlagsSafe NormalizationFlags = FlagLowercaseHost | FlagLowercaseScheme | FlagUppercaseEscapes | FlagDecodeUnnecessaryEscapes | FlagEncodeNecessaryEscapes | FlagRemoveDefaultPort | FlagRemoveEmptyQuerySeparator
+
+	// For convenience sets, "greedy" uses the "remove trailing slash" and "remove www. prefix" flags,
+	// while "non-greedy" uses the "add (or keep) the trailing slash" and "add www. prefix".
+
+	// Convenience set of usually safe normalizations (includes FlagsSafe)
+	FlagsUsuallySafeGreedy    NormalizationFlags = FlagsSafe | FlagRemoveTrailingSlash | FlagRemoveDotSegments
+	FlagsUsuallySafeNonGreedy NormalizationFlags = FlagsSafe | FlagAddTrailingSlash | FlagRemoveDotSegments
+
+	// Convenience set of unsafe normalizations (includes FlagsUsuallySafe)
+	FlagsUnsafeGreedy    NormalizationFlags = FlagsUsuallySafeGreedy | FlagRemoveDirectoryIndex | FlagRemoveFragment | FlagForceHTTP | FlagRemoveDuplicateSlashes | FlagRemoveWWW | FlagSortQuery
+	FlagsUnsafeNonGreedy NormalizationFlags = FlagsUsuallySafeNonGreedy | FlagRemoveDirectoryIndex | FlagRemoveFragment | FlagForceHTTP | FlagRemoveDuplicateSlashes | FlagAddWWW | FlagSortQuery
+
+	// Convenience set of all available flags
+	FlagsAllGreedy    = FlagsUnsafeGreedy | FlagDecodeDWORDHost | FlagDecodeOctalHost | FlagDecodeHexHost | FlagRemoveUnnecessaryHostDots | FlagRemoveEmptyPortSeparator
+	FlagsAllNonGreedy = FlagsUnsafeNonGreedy | FlagDecodeDWORDHost | FlagDecodeOctalHost | FlagDecodeHexHost | FlagRemoveUnnecessaryHostDots | FlagRemoveEmptyPortSeparator
+)
+```
+
+For convenience, the set of flags `FlagsSafe`, `FlagsUsuallySafe[Greedy|NonGreedy]`, `FlagsUnsafe[Greedy|NonGreedy]` and `FlagsAll[Greedy|NonGreedy]` are provided for the similarly grouped normalizations on [wikipedia's URL normalization page][wiki]. You can add (using the bitwise OR `|` operator) or remove (using the bitwise AND NOT `&^` operator) individual flags from the sets if required, to build your own custom set.
+
+The [full godoc reference is available on gopkgdoc][godoc].
+
+Some things to note:
+
+*    `FlagDecodeUnnecessaryEscapes`, `FlagEncodeNecessaryEscapes`, `FlagUppercaseEscapes` and `FlagRemoveEmptyQuerySeparator` are always implicitly set, because internally, the URL string is parsed as an URL object, which automatically decodes unnecessary escapes, uppercases and encodes necessary ones, and removes empty query separators (an unnecessary `?` at the end of the url). So this operation cannot **not** be done. For this reason, `FlagRemoveEmptyQuerySeparator` (as well as the other three) has been included in the `FlagsSafe` convenience set, instead of `FlagsUnsafe`, where Wikipedia puts it.
+
+*    The `FlagDecodeUnnecessaryEscapes` decodes the following escapes (*from -> to*):
+    -    %24 -> $
+    -    %26 -> &
+    -    %2B-%3B -> +,-./0123456789:;
+    -    %3D -> =
+    -    %40-%5A -> @ABCDEFGHIJKLMNOPQRSTUVWXYZ
+    -    %5F -> _
+    -    %61-%7A -> abcdefghijklmnopqrstuvwxyz
+    -    %7E -> ~
+
+
+*    When the `NormalizeURL` function is used (passing an URL object), this source URL object is modified (that is, after the call, the URL object will be modified to reflect the normalization).
+
+*    The *replace IP with domain name* normalization (`http://208.77.188.166/ → http://www.example.com/`) is obviously not possible for a library without making some network requests. This is not implemented in purell.
+
+*    The *remove unused query string parameters* and *remove default query parameters* are also not implemented, since this is a very case-specific normalization, and it is quite trivial to do with an URL object.
+
+### Safe vs Usually Safe vs Unsafe
+
+Purell allows you to control the level of risk you take while normalizing an URL. You can aggressively normalize, play it totally safe, or anything in between.
+
+Consider the following URL:
+
+`HTTPS://www.RooT.com/toto/t%45%1f///a/./b/../c/?z=3&w=2&a=4&w=1#invalid`
+
+Normalizing with the `FlagsSafe` gives:
+
+`https://www.root.com/toto/tE%1F///a/./b/../c/?z=3&w=2&a=4&w=1#invalid`
+
+With the `FlagsUsuallySafeGreedy`:
+
+`https://www.root.com/toto/tE%1F///a/c?z=3&w=2&a=4&w=1#invalid`
+
+And with `FlagsUnsafeGreedy`:
+
+`http://root.com/toto/tE%1F/a/c?a=4&w=1&w=2&z=3`
+
+## TODOs
+
+*    Add a class/default instance to allow specifying custom directory index names? At the moment, removing directory index removes `(^|/)((?:default|index)\.\w{1,4})$`.
+
+## Thanks / Contributions
+
+@rogpeppe
+@jehiah
+@opennota
+@pchristopher1275
+@zenovich
+@beeker1121
+
+## License
+
+The [BSD 3-Clause license][bsd].
+
+[bsd]: http://opensource.org/licenses/BSD-3-Clause
+[wiki]: http://en.wikipedia.org/wiki/URL_normalization
+[rfc]: http://tools.ietf.org/html/rfc3986#section-6
+[godoc]: http://go.pkgdoc.org/github.com/PuerkitoBio/purell
+[pr5]: https://github.com/PuerkitoBio/purell/pull/5
+[iss7]: https://github.com/PuerkitoBio/purell/issues/7
diff --git a/vendor/github.com/PuerkitoBio/purell/purell.go b/vendor/github.com/PuerkitoBio/purell/purell.go
new file mode 100644
index 00000000..6d0fc190
--- /dev/null
+++ b/vendor/github.com/PuerkitoBio/purell/purell.go
@@ -0,0 +1,379 @@
+/*
+Package purell offers URL normalization as described on the wikipedia page:
+http://en.wikipedia.org/wiki/URL_normalization
+*/
+package purell
+
+import (
+	"bytes"
+	"fmt"
+	"net/url"
+	"regexp"
+	"sort"
+	"strconv"
+	"strings"
+
+	"github.com/PuerkitoBio/urlesc"
+	"golang.org/x/net/idna"
+	"golang.org/x/text/unicode/norm"
+	"golang.org/x/text/width"
+)
+
+// A set of normalization flags determines how a URL will
+// be normalized.
+type NormalizationFlags uint
+
+const (
+	// Safe normalizations
+	FlagLowercaseScheme           NormalizationFlags = 1 << iota // HTTP://host -> http://host, applied by default in Go1.1
+	FlagLowercaseHost                                            // http://HOST -> http://host
+	FlagUppercaseEscapes                                         // http://host/t%ef -> http://host/t%EF
+	FlagDecodeUnnecessaryEscapes                                 // http://host/t%41 -> http://host/tA
+	FlagEncodeNecessaryEscapes                                   // http://host/!"#$ -> http://host/%21%22#$
+	FlagRemoveDefaultPort                                        // http://host:80 -> http://host
+	FlagRemoveEmptyQuerySeparator                                // http://host/path? -> http://host/path
+
+	// Usually safe normalizations
+	FlagRemoveTrailingSlash // http://host/path/ -> http://host/path
+	FlagAddTrailingSlash    // http://host/path -> http://host/path/ (should choose only one of these add/remove trailing slash flags)
+	FlagRemoveDotSegments   // http://host/path/./a/b/../c -> http://host/path/a/c
+
+	// Unsafe normalizations
+	FlagRemoveDirectoryIndex   // http://host/path/index.html -> http://host/path/
+	FlagRemoveFragment         // http://host/path#fragment -> http://host/path
+	FlagForceHTTP              // https://host -> http://host
+	FlagRemoveDuplicateSlashes // http://host/path//a///b -> http://host/path/a/b
+	FlagRemoveWWW              // http://www.host/ -> http://host/
+	FlagAddWWW                 // http://host/ -> http://www.host/ (should choose only one of these add/remove WWW flags)
+	FlagSortQuery              // http://host/path?c=3&b=2&a=1&b=1 -> http://host/path?a=1&b=1&b=2&c=3
+
+	// Normalizations not in the wikipedia article, required to cover tests cases
+	// submitted by jehiah
+	FlagDecodeDWORDHost           // http://1113982867 -> http://66.102.7.147
+	FlagDecodeOctalHost           // http://0102.0146.07.0223 -> http://66.102.7.147
+	FlagDecodeHexHost             // http://0x42660793 -> http://66.102.7.147
+	FlagRemoveUnnecessaryHostDots // http://.host../path -> http://host/path
+	FlagRemoveEmptyPortSeparator  // http://host:/path -> http://host/path
+
+	// Convenience set of safe normalizations
+	FlagsSafe NormalizationFlags = FlagLowercaseHost | FlagLowercaseScheme | FlagUppercaseEscapes | FlagDecodeUnnecessaryEscapes | FlagEncodeNecessaryEscapes | FlagRemoveDefaultPort | FlagRemoveEmptyQuerySeparator
+
+	// For convenience sets, "greedy" uses the "remove trailing slash" and "remove www. prefix" flags,
+	// while "non-greedy" uses the "add (or keep) the trailing slash" and "add www. prefix".
+
+	// Convenience set of usually safe normalizations (includes FlagsSafe)
+	FlagsUsuallySafeGreedy    NormalizationFlags = FlagsSafe | FlagRemoveTrailingSlash | FlagRemoveDotSegments
+	FlagsUsuallySafeNonGreedy NormalizationFlags = FlagsSafe | FlagAddTrailingSlash | FlagRemoveDotSegments
+
+	// Convenience set of unsafe normalizations (includes FlagsUsuallySafe)
+	FlagsUnsafeGreedy    NormalizationFlags = FlagsUsuallySafeGreedy | FlagRemoveDirectoryIndex | FlagRemoveFragment | FlagForceHTTP | FlagRemoveDuplicateSlashes | FlagRemoveWWW | FlagSortQuery
+	FlagsUnsafeNonGreedy NormalizationFlags = FlagsUsuallySafeNonGreedy | FlagRemoveDirectoryIndex | FlagRemoveFragment | FlagForceHTTP | FlagRemoveDuplicateSlashes | FlagAddWWW | FlagSortQuery
+
+	// Convenience set of all available flags
+	FlagsAllGreedy    = FlagsUnsafeGreedy | FlagDecodeDWORDHost | FlagDecodeOctalHost | FlagDecodeHexHost | FlagRemoveUnnecessaryHostDots | FlagRemoveEmptyPortSeparator
+	FlagsAllNonGreedy = FlagsUnsafeNonGreedy | FlagDecodeDWORDHost | FlagDecodeOctalHost | FlagDecodeHexHost | FlagRemoveUnnecessaryHostDots | FlagRemoveEmptyPortSeparator
+)
+
+const (
+	defaultHttpPort  = ":80"
+	defaultHttpsPort = ":443"
+)
+
+// Regular expressions used by the normalizations
+var rxPort = regexp.MustCompile(`(:\d+)/?$`)
+var rxDirIndex = regexp.MustCompile(`(^|/)((?:default|index)\.\w{1,4})$`)
+var rxDupSlashes = regexp.MustCompile(`/{2,}`)
+var rxDWORDHost = regexp.MustCompile(`^(\d+)((?:\.+)?(?:\:\d*)?)$`)
+var rxOctalHost = regexp.MustCompile(`^(0\d*)\.(0\d*)\.(0\d*)\.(0\d*)((?:\.+)?(?:\:\d*)?)$`)
+var rxHexHost = regexp.MustCompile(`^0x([0-9A-Fa-f]+)((?:\.+)?(?:\:\d*)?)$`)
+var rxHostDots = regexp.MustCompile(`^(.+?)(:\d+)?$`)
+var rxEmptyPort = regexp.MustCompile(`:+$`)
+
+// Map of flags to implementation function.
+// FlagDecodeUnnecessaryEscapes has no action, since it is done automatically
+// by parsing the string as an URL. Same for FlagUppercaseEscapes and FlagRemoveEmptyQuerySeparator.
+
+// Since maps have undefined traversing order, make a slice of ordered keys
+var flagsOrder = []NormalizationFlags{
+	FlagLowercaseScheme,
+	FlagLowercaseHost,
+	FlagRemoveDefaultPort,
+	FlagRemoveDirectoryIndex,
+	FlagRemoveDotSegments,
+	FlagRemoveFragment,
+	FlagForceHTTP, // Must be after remove default port (because https=443/http=80)
+	FlagRemoveDuplicateSlashes,
+	FlagRemoveWWW,
+	FlagAddWWW,
+	FlagSortQuery,
+	FlagDecodeDWORDHost,
+	FlagDecodeOctalHost,
+	FlagDecodeHexHost,
+	FlagRemoveUnnecessaryHostDots,
+	FlagRemoveEmptyPortSeparator,
+	FlagRemoveTrailingSlash, // These two (add/remove trailing slash) must be last
+	FlagAddTrailingSlash,
+}
+
+// ... and then the map, where order is unimportant
+var flags = map[NormalizationFlags]func(*url.URL){
+	FlagLowercaseScheme:           lowercaseScheme,
+	FlagLowercaseHost:             lowercaseHost,
+	FlagRemoveDefaultPort:         removeDefaultPort,
+	FlagRemoveDirectoryIndex:      removeDirectoryIndex,
+	FlagRemoveDotSegments:         removeDotSegments,
+	FlagRemoveFragment:            removeFragment,
+	FlagForceHTTP:                 forceHTTP,
+	FlagRemoveDuplicateSlashes:    removeDuplicateSlashes,
+	FlagRemoveWWW:                 removeWWW,
+	FlagAddWWW:                    addWWW,
+	FlagSortQuery:                 sortQuery,
+	FlagDecodeDWORDHost:           decodeDWORDHost,
+	FlagDecodeOctalHost:           decodeOctalHost,
+	FlagDecodeHexHost:             decodeHexHost,
+	FlagRemoveUnnecessaryHostDots: removeUnncessaryHostDots,
+	FlagRemoveEmptyPortSeparator:  removeEmptyPortSeparator,
+	FlagRemoveTrailingSlash:       removeTrailingSlash,
+	FlagAddTrailingSlash:          addTrailingSlash,
+}
+
+// MustNormalizeURLString returns the normalized string, and panics if an error occurs.
+// It takes an URL string as input, as well as the normalization flags.
+func MustNormalizeURLString(u string, f NormalizationFlags) string {
+	result, e := NormalizeURLString(u, f)
+	if e != nil {
+		panic(e)
+	}
+	return result
+}
+
+// NormalizeURLString returns the normalized string, or an error if it can't be parsed into an URL object.
+// It takes an URL string as input, as well as the normalization flags.
+func NormalizeURLString(u string, f NormalizationFlags) (string, error) {
+	parsed, err := url.Parse(u)
+	if err != nil {
+		return "", err
+	}
+
+	if f&FlagLowercaseHost == FlagLowercaseHost {
+		parsed.Host = strings.ToLower(parsed.Host)
+	}
+
+	// The idna package doesn't fully conform to RFC 5895
+	// (https://tools.ietf.org/html/rfc5895), so we do it here.
+	// Taken from Go 1.8 cycle source, courtesy of bradfitz.
+	// TODO: Remove when (if?) idna package conforms to RFC 5895.
+	parsed.Host = width.Fold.String(parsed.Host)
+	parsed.Host = norm.NFC.String(parsed.Host)
+	if parsed.Host, err = idna.ToASCII(parsed.Host); err != nil {
+		return "", err
+	}
+
+	return NormalizeURL(parsed, f), nil
+}
+
+// NormalizeURL returns the normalized string.
+// It takes a parsed URL object as input, as well as the normalization flags.
+func NormalizeURL(u *url.URL, f NormalizationFlags) string {
+	for _, k := range flagsOrder {
+		if f&k == k {
+			flags[k](u)
+		}
+	}
+	return urlesc.Escape(u)
+}
+
+func lowercaseScheme(u *url.URL) {
+	if len(u.Scheme) > 0 {
+		u.Scheme = strings.ToLower(u.Scheme)
+	}
+}
+
+func lowercaseHost(u *url.URL) {
+	if len(u.Host) > 0 {
+		u.Host = strings.ToLower(u.Host)
+	}
+}
+
+func removeDefaultPort(u *url.URL) {
+	if len(u.Host) > 0 {
+		scheme := strings.ToLower(u.Scheme)
+		u.Host = rxPort.ReplaceAllStringFunc(u.Host, func(val string) string {
+			if (scheme == "http" && val == defaultHttpPort) || (scheme == "https" && val == defaultHttpsPort) {
+				return ""
+			}
+			return val
+		})
+	}
+}
+
+func removeTrailingSlash(u *url.URL) {
+	if l := len(u.Path); l > 0 {
+		if strings.HasSuffix(u.Path, "/") {
+			u.Path = u.Path[:l-1]
+		}
+	} else if l = len(u.Host); l > 0 {
+		if strings.HasSuffix(u.Host, "/") {
+			u.Host = u.Host[:l-1]
+		}
+	}
+}
+
+func addTrailingSlash(u *url.URL) {
+	if l := len(u.Path); l > 0 {
+		if !strings.HasSuffix(u.Path, "/") {
+			u.Path += "/"
+		}
+	} else if l = len(u.Host); l > 0 {
+		if !strings.HasSuffix(u.Host, "/") {
+			u.Host += "/"
+		}
+	}
+}
+
+func removeDotSegments(u *url.URL) {
+	if len(u.Path) > 0 {
+		var dotFree []string
+		var lastIsDot bool
+
+		sections := strings.Split(u.Path, "/")
+		for _, s := range sections {
+			if s == ".." {
+				if len(dotFree) > 0 {
+					dotFree = dotFree[:len(dotFree)-1]
+				}
+			} else if s != "." {
+				dotFree = append(dotFree, s)
+			}
+			lastIsDot = (s == "." || s == "..")
+		}
+		// Special case if host does not end with / and new path does not begin with /
+		u.Path = strings.Join(dotFree, "/")
+		if u.Host != "" && !strings.HasSuffix(u.Host, "/") && !strings.HasPrefix(u.Path, "/") {
+			u.Path = "/" + u.Path
+		}
+		// Special case if the last segment was a dot, make sure the path ends with a slash
+		if lastIsDot && !strings.HasSuffix(u.Path, "/") {
+			u.Path += "/"
+		}
+	}
+}
+
+func removeDirectoryIndex(u *url.URL) {
+	if len(u.Path) > 0 {
+		u.Path = rxDirIndex.ReplaceAllString(u.Path, "$1")
+	}
+}
+
+func removeFragment(u *url.URL) {
+	u.Fragment = ""
+}
+
+func forceHTTP(u *url.URL) {
+	if strings.ToLower(u.Scheme) == "https" {
+		u.Scheme = "http"
+	}
+}
+
+func removeDuplicateSlashes(u *url.URL) {
+	if len(u.Path) > 0 {
+		u.Path = rxDupSlashes.ReplaceAllString(u.Path, "/")
+	}
+}
+
+func removeWWW(u *url.URL) {
+	if len(u.Host) > 0 && strings.HasPrefix(strings.ToLower(u.Host), "www.") {
+		u.Host = u.Host[4:]
+	}
+}
+
+func addWWW(u *url.URL) {
+	if len(u.Host) > 0 && !strings.HasPrefix(strings.ToLower(u.Host), "www.") {
+		u.Host = "www." + u.Host
+	}
+}
+
+func sortQuery(u *url.URL) {
+	q := u.Query()
+
+	if len(q) > 0 {
+		arKeys := make([]string, len(q))
+		i := 0
+		for k := range q {
+			arKeys[i] = k
+			i++
+		}
+		sort.Strings(arKeys)
+		buf := new(bytes.Buffer)
+		for _, k := range arKeys {
+			sort.Strings(q[k])
+			for _, v := range q[k] {
+				if buf.Len() > 0 {
+					buf.WriteRune('&')
+				}
+				buf.WriteString(fmt.Sprintf("%s=%s", k, urlesc.QueryEscape(v)))
+			}
+		}
+
+		// Rebuild the raw query string
+		u.RawQuery = buf.String()
+	}
+}
+
+func decodeDWORDHost(u *url.URL) {
+	if len(u.Host) > 0 {
+		if matches := rxDWORDHost.FindStringSubmatch(u.Host); len(matches) > 2 {
+			var parts [4]int64
+
+			dword, _ := strconv.ParseInt(matches[1], 10, 0)
+			for i, shift := range []uint{24, 16, 8, 0} {
+				parts[i] = dword >> shift & 0xFF
+			}
+			u.Host = fmt.Sprintf("%d.%d.%d.%d%s", parts[0], parts[1], parts[2], parts[3], matches[2])
+		}
+	}
+}
+
+func decodeOctalHost(u *url.URL) {
+	if len(u.Host) > 0 {
+		if matches := rxOctalHost.FindStringSubmatch(u.Host); len(matches) > 5 {
+			var parts [4]int64
+
+			for i := 1; i <= 4; i++ {
+				parts[i-1], _ = strconv.ParseInt(matches[i], 8, 0)
+			}
+			u.Host = fmt.Sprintf("%d.%d.%d.%d%s", parts[0], parts[1], parts[2], parts[3], matches[5])
+		}
+	}
+}
+
+func decodeHexHost(u *url.URL) {
+	if len(u.Host) > 0 {
+		if matches := rxHexHost.FindStringSubmatch(u.Host); len(matches) > 2 {
+			// Conversion is safe because of regex validation
+			parsed, _ := strconv.ParseInt(matches[1], 16, 0)
+			// Set host as DWORD (base 10) encoded host
+			u.Host = fmt.Sprintf("%d%s", parsed, matches[2])
+			// The rest is the same as decoding a DWORD host
+			decodeDWORDHost(u)
+		}
+	}
+}
+
+func removeUnncessaryHostDots(u *url.URL) {
+	if len(u.Host) > 0 {
+		if matches := rxHostDots.FindStringSubmatch(u.Host); len(matches) > 1 {
+			// Trim the leading and trailing dots
+			u.Host = strings.Trim(matches[1], ".")
+			if len(matches) > 2 {
+				u.Host += matches[2]
+			}
+		}
+	}
+}
+
+func removeEmptyPortSeparator(u *url.URL) {
+	if len(u.Host) > 0 {
+		u.Host = rxEmptyPort.ReplaceAllString(u.Host, "")
+	}
+}
diff --git a/vendor/github.com/PuerkitoBio/urlesc/.travis.yml b/vendor/github.com/PuerkitoBio/urlesc/.travis.yml
new file mode 100644
index 00000000..ba6b225f
--- /dev/null
+++ b/vendor/github.com/PuerkitoBio/urlesc/.travis.yml
@@ -0,0 +1,15 @@
+language: go
+
+go:
+  - 1.4.x
+  - 1.5.x
+  - 1.6.x
+  - 1.7.x
+  - 1.8.x
+  - tip
+
+install:
+  - go build .
+
+script:
+  - go test -v
diff --git a/vendor/github.com/PuerkitoBio/urlesc/LICENSE b/vendor/github.com/PuerkitoBio/urlesc/LICENSE
new file mode 100644
index 00000000..74487567
--- /dev/null
+++ b/vendor/github.com/PuerkitoBio/urlesc/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2012 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+   * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+   * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+   * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/PuerkitoBio/urlesc/README.md b/vendor/github.com/PuerkitoBio/urlesc/README.md
new file mode 100644
index 00000000..57aff0a5
--- /dev/null
+++ b/vendor/github.com/PuerkitoBio/urlesc/README.md
@@ -0,0 +1,16 @@
+urlesc [![Build Status](https://travis-ci.org/PuerkitoBio/urlesc.svg?branch=master)](https://travis-ci.org/PuerkitoBio/urlesc) [![GoDoc](http://godoc.org/github.com/PuerkitoBio/urlesc?status.svg)](http://godoc.org/github.com/PuerkitoBio/urlesc)
+======
+
+Package urlesc implements query escaping as per RFC 3986.
+
+It contains some parts of the net/url package, modified so as to allow
+some reserved characters incorrectly escaped by net/url (see [issue 5684](https://github.com/golang/go/issues/5684)).
+
+## Install
+
+    go get github.com/PuerkitoBio/urlesc
+
+## License
+
+Go license (BSD-3-Clause)
+
diff --git a/vendor/github.com/PuerkitoBio/urlesc/urlesc.go b/vendor/github.com/PuerkitoBio/urlesc/urlesc.go
new file mode 100644
index 00000000..1b846245
--- /dev/null
+++ b/vendor/github.com/PuerkitoBio/urlesc/urlesc.go
@@ -0,0 +1,180 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package urlesc implements query escaping as per RFC 3986.
+// It contains some parts of the net/url package, modified so as to allow
+// some reserved characters incorrectly escaped by net/url.
+// See https://github.com/golang/go/issues/5684
+package urlesc
+
+import (
+	"bytes"
+	"net/url"
+	"strings"
+)
+
+type encoding int
+
+const (
+	encodePath encoding = 1 + iota
+	encodeUserPassword
+	encodeQueryComponent
+	encodeFragment
+)
+
+// Return true if the specified character should be escaped when
+// appearing in a URL string, according to RFC 3986.
+func shouldEscape(c byte, mode encoding) bool {
+	// §2.3 Unreserved characters (alphanum)
+	if 'A' <= c && c <= 'Z' || 'a' <= c && c <= 'z' || '0' <= c && c <= '9' {
+		return false
+	}
+
+	switch c {
+	case '-', '.', '_', '~': // §2.3 Unreserved characters (mark)
+		return false
+
+	// §2.2 Reserved characters (reserved)
+	case ':', '/', '?', '#', '[', ']', '@', // gen-delims
+		'!', '$', '&', '\'', '(', ')', '*', '+', ',', ';', '=': // sub-delims
+		// Different sections of the URL allow a few of
+		// the reserved characters to appear unescaped.
+		switch mode {
+		case encodePath: // §3.3
+			// The RFC allows sub-delims and : @.
+			// '/', '[' and ']' can be used to assign meaning to individual path
+			// segments.  This package only manipulates the path as a whole,
+			// so we allow those as well.  That leaves only ? and # to escape.
+			return c == '?' || c == '#'
+
+		case encodeUserPassword: // §3.2.1
+			// The RFC allows : and sub-delims in
+			// userinfo.  The parsing of userinfo treats ':' as special so we must escape
+			// all the gen-delims.
+			return c == ':' || c == '/' || c == '?' || c == '#' || c == '[' || c == ']' || c == '@'
+
+		case encodeQueryComponent: // §3.4
+			// The RFC allows / and ?.
+			return c != '/' && c != '?'
+
+		case encodeFragment: // §4.1
+			// The RFC text is silent but the grammar allows
+			// everything, so escape nothing but #
+			return c == '#'
+		}
+	}
+
+	// Everything else must be escaped.
+	return true
+}
+
+// QueryEscape escapes the string so it can be safely placed
+// inside a URL query.
+func QueryEscape(s string) string {
+	return escape(s, encodeQueryComponent)
+}
+
+func escape(s string, mode encoding) string {
+	spaceCount, hexCount := 0, 0
+	for i := 0; i < len(s); i++ {
+		c := s[i]
+		if shouldEscape(c, mode) {
+			if c == ' ' && mode == encodeQueryComponent {
+				spaceCount++
+			} else {
+				hexCount++
+			}
+		}
+	}
+
+	if spaceCount == 0 && hexCount == 0 {
+		return s
+	}
+
+	t := make([]byte, len(s)+2*hexCount)
+	j := 0
+	for i := 0; i < len(s); i++ {
+		switch c := s[i]; {
+		case c == ' ' && mode == encodeQueryComponent:
+			t[j] = '+'
+			j++
+		case shouldEscape(c, mode):
+			t[j] = '%'
+			t[j+1] = "0123456789ABCDEF"[c>>4]
+			t[j+2] = "0123456789ABCDEF"[c&15]
+			j += 3
+		default:
+			t[j] = s[i]
+			j++
+		}
+	}
+	return string(t)
+}
+
+var uiReplacer = strings.NewReplacer(
+	"%21", "!",
+	"%27", "'",
+	"%28", "(",
+	"%29", ")",
+	"%2A", "*",
+)
+
+// unescapeUserinfo unescapes some characters that need not to be escaped as per RFC3986.
+func unescapeUserinfo(s string) string {
+	return uiReplacer.Replace(s)
+}
+
+// Escape reassembles the URL into a valid URL string.
+// The general form of the result is one of:
+//
+//	scheme:opaque
+//	scheme://userinfo@host/path?query#fragment
+//
+// If u.Opaque is non-empty, String uses the first form;
+// otherwise it uses the second form.
+//
+// In the second form, the following rules apply:
+//	- if u.Scheme is empty, scheme: is omitted.
+//	- if u.User is nil, userinfo@ is omitted.
+//	- if u.Host is empty, host/ is omitted.
+//	- if u.Scheme and u.Host are empty and u.User is nil,
+//	   the entire scheme://userinfo@host/ is omitted.
+//	- if u.Host is non-empty and u.Path begins with a /,
+//	   the form host/path does not add its own /.
+//	- if u.RawQuery is empty, ?query is omitted.
+//	- if u.Fragment is empty, #fragment is omitted.
+func Escape(u *url.URL) string {
+	var buf bytes.Buffer
+	if u.Scheme != "" {
+		buf.WriteString(u.Scheme)
+		buf.WriteByte(':')
+	}
+	if u.Opaque != "" {
+		buf.WriteString(u.Opaque)
+	} else {
+		if u.Scheme != "" || u.Host != "" || u.User != nil {
+			buf.WriteString("//")
+			if ui := u.User; ui != nil {
+				buf.WriteString(unescapeUserinfo(ui.String()))
+				buf.WriteByte('@')
+			}
+			if h := u.Host; h != "" {
+				buf.WriteString(h)
+			}
+		}
+		if u.Path != "" && u.Path[0] != '/' && u.Host != "" {
+			buf.WriteByte('/')
+		}
+		buf.WriteString(escape(u.Path, encodePath))
+	}
+	if u.RawQuery != "" {
+		buf.WriteByte('?')
+		buf.WriteString(u.RawQuery)
+	}
+	if u.Fragment != "" {
+		buf.WriteByte('#')
+		buf.WriteString(escape(u.Fragment, encodeFragment))
+	}
+	return buf.String()
+}
diff --git a/vendor/github.com/docker/docker/pkg/term/ascii.go b/vendor/github.com/docker/docker/pkg/term/ascii.go
new file mode 100644
index 00000000..87bca8d4
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/term/ascii.go
@@ -0,0 +1,66 @@
+package term // import "github.com/docker/docker/pkg/term"
+
+import (
+	"fmt"
+	"strings"
+)
+
+// ASCII list the possible supported ASCII key sequence
+var ASCII = []string{
+	"ctrl-@",
+	"ctrl-a",
+	"ctrl-b",
+	"ctrl-c",
+	"ctrl-d",
+	"ctrl-e",
+	"ctrl-f",
+	"ctrl-g",
+	"ctrl-h",
+	"ctrl-i",
+	"ctrl-j",
+	"ctrl-k",
+	"ctrl-l",
+	"ctrl-m",
+	"ctrl-n",
+	"ctrl-o",
+	"ctrl-p",
+	"ctrl-q",
+	"ctrl-r",
+	"ctrl-s",
+	"ctrl-t",
+	"ctrl-u",
+	"ctrl-v",
+	"ctrl-w",
+	"ctrl-x",
+	"ctrl-y",
+	"ctrl-z",
+	"ctrl-[",
+	"ctrl-\\",
+	"ctrl-]",
+	"ctrl-^",
+	"ctrl-_",
+}
+
+// ToBytes converts a string representing a suite of key-sequence to the corresponding ASCII code.
+func ToBytes(keys string) ([]byte, error) {
+	codes := []byte{}
+next:
+	for _, key := range strings.Split(keys, ",") {
+		if len(key) != 1 {
+			for code, ctrl := range ASCII {
+				if ctrl == key {
+					codes = append(codes, byte(code))
+					continue next
+				}
+			}
+			if key == "DEL" {
+				codes = append(codes, 127)
+			} else {
+				return nil, fmt.Errorf("Unknown character: '%s'", key)
+			}
+		} else {
+			codes = append(codes, key[0])
+		}
+	}
+	return codes, nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/term/proxy.go b/vendor/github.com/docker/docker/pkg/term/proxy.go
new file mode 100644
index 00000000..da733e58
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/term/proxy.go
@@ -0,0 +1,78 @@
+package term // import "github.com/docker/docker/pkg/term"
+
+import (
+	"io"
+)
+
+// EscapeError is special error which returned by a TTY proxy reader's Read()
+// method in case its detach escape sequence is read.
+type EscapeError struct{}
+
+func (EscapeError) Error() string {
+	return "read escape sequence"
+}
+
+// escapeProxy is used only for attaches with a TTY. It is used to proxy
+// stdin keypresses from the underlying reader and look for the passed in
+// escape key sequence to signal a detach.
+type escapeProxy struct {
+	escapeKeys   []byte
+	escapeKeyPos int
+	r            io.Reader
+}
+
+// NewEscapeProxy returns a new TTY proxy reader which wraps the given reader
+// and detects when the specified escape keys are read, in which case the Read
+// method will return an error of type EscapeError.
+func NewEscapeProxy(r io.Reader, escapeKeys []byte) io.Reader {
+	return &escapeProxy{
+		escapeKeys: escapeKeys,
+		r:          r,
+	}
+}
+
+func (r *escapeProxy) Read(buf []byte) (int, error) {
+	nr, err := r.r.Read(buf)
+
+	if len(r.escapeKeys) == 0 {
+		return nr, err
+	}
+
+	preserve := func() {
+		// this preserves the original key presses in the passed in buffer
+		nr += r.escapeKeyPos
+		preserve := make([]byte, 0, r.escapeKeyPos+len(buf))
+		preserve = append(preserve, r.escapeKeys[:r.escapeKeyPos]...)
+		preserve = append(preserve, buf...)
+		r.escapeKeyPos = 0
+		copy(buf[0:nr], preserve)
+	}
+
+	if nr != 1 || err != nil {
+		if r.escapeKeyPos > 0 {
+			preserve()
+		}
+		return nr, err
+	}
+
+	if buf[0] != r.escapeKeys[r.escapeKeyPos] {
+		if r.escapeKeyPos > 0 {
+			preserve()
+		}
+		return nr, nil
+	}
+
+	if r.escapeKeyPos == len(r.escapeKeys)-1 {
+		return 0, EscapeError{}
+	}
+
+	// Looks like we've got an escape key, but we need to match again on the next
+	// read.
+	// Store the current escape key we found so we can look for the next one on
+	// the next read.
+	// Since this is an escape key, make sure we don't let the caller read it
+	// If later on we find that this is not the escape sequence, we'll add the
+	// keys back
+	r.escapeKeyPos++
+	return nr - r.escapeKeyPos, nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/term/tc.go b/vendor/github.com/docker/docker/pkg/term/tc.go
new file mode 100644
index 00000000..01bcaa8a
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/term/tc.go
@@ -0,0 +1,20 @@
+// +build !windows
+
+package term // import "github.com/docker/docker/pkg/term"
+
+import (
+	"syscall"
+	"unsafe"
+
+	"golang.org/x/sys/unix"
+)
+
+func tcget(fd uintptr, p *Termios) syscall.Errno {
+	_, _, err := unix.Syscall(unix.SYS_IOCTL, fd, uintptr(getTermios), uintptr(unsafe.Pointer(p)))
+	return err
+}
+
+func tcset(fd uintptr, p *Termios) syscall.Errno {
+	_, _, err := unix.Syscall(unix.SYS_IOCTL, fd, setTermios, uintptr(unsafe.Pointer(p)))
+	return err
+}
diff --git a/vendor/github.com/docker/docker/pkg/term/term.go b/vendor/github.com/docker/docker/pkg/term/term.go
new file mode 100644
index 00000000..0589a955
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/term/term.go
@@ -0,0 +1,124 @@
+// +build !windows
+
+// Package term provides structures and helper functions to work with
+// terminal (state, sizes).
+package term // import "github.com/docker/docker/pkg/term"
+
+import (
+	"errors"
+	"fmt"
+	"io"
+	"os"
+	"os/signal"
+
+	"golang.org/x/sys/unix"
+)
+
+var (
+	// ErrInvalidState is returned if the state of the terminal is invalid.
+	ErrInvalidState = errors.New("Invalid terminal state")
+)
+
+// State represents the state of the terminal.
+type State struct {
+	termios Termios
+}
+
+// Winsize represents the size of the terminal window.
+type Winsize struct {
+	Height uint16
+	Width  uint16
+	x      uint16
+	y      uint16
+}
+
+// StdStreams returns the standard streams (stdin, stdout, stderr).
+func StdStreams() (stdIn io.ReadCloser, stdOut, stdErr io.Writer) {
+	return os.Stdin, os.Stdout, os.Stderr
+}
+
+// GetFdInfo returns the file descriptor for an os.File and indicates whether the file represents a terminal.
+func GetFdInfo(in interface{}) (uintptr, bool) {
+	var inFd uintptr
+	var isTerminalIn bool
+	if file, ok := in.(*os.File); ok {
+		inFd = file.Fd()
+		isTerminalIn = IsTerminal(inFd)
+	}
+	return inFd, isTerminalIn
+}
+
+// IsTerminal returns true if the given file descriptor is a terminal.
+func IsTerminal(fd uintptr) bool {
+	var termios Termios
+	return tcget(fd, &termios) == 0
+}
+
+// RestoreTerminal restores the terminal connected to the given file descriptor
+// to a previous state.
+func RestoreTerminal(fd uintptr, state *State) error {
+	if state == nil {
+		return ErrInvalidState
+	}
+	if err := tcset(fd, &state.termios); err != 0 {
+		return err
+	}
+	return nil
+}
+
+// SaveState saves the state of the terminal connected to the given file descriptor.
+func SaveState(fd uintptr) (*State, error) {
+	var oldState State
+	if err := tcget(fd, &oldState.termios); err != 0 {
+		return nil, err
+	}
+
+	return &oldState, nil
+}
+
+// DisableEcho applies the specified state to the terminal connected to the file
+// descriptor, with echo disabled.
+func DisableEcho(fd uintptr, state *State) error {
+	newState := state.termios
+	newState.Lflag &^= unix.ECHO
+
+	if err := tcset(fd, &newState); err != 0 {
+		return err
+	}
+	handleInterrupt(fd, state)
+	return nil
+}
+
+// SetRawTerminal puts the terminal connected to the given file descriptor into
+// raw mode and returns the previous state. On UNIX, this puts both the input
+// and output into raw mode. On Windows, it only puts the input into raw mode.
+func SetRawTerminal(fd uintptr) (*State, error) {
+	oldState, err := MakeRaw(fd)
+	if err != nil {
+		return nil, err
+	}
+	handleInterrupt(fd, oldState)
+	return oldState, err
+}
+
+// SetRawTerminalOutput puts the output of terminal connected to the given file
+// descriptor into raw mode. On UNIX, this does nothing and returns nil for the
+// state. On Windows, it disables LF -> CRLF translation.
+func SetRawTerminalOutput(fd uintptr) (*State, error) {
+	return nil, nil
+}
+
+func handleInterrupt(fd uintptr, state *State) {
+	sigchan := make(chan os.Signal, 1)
+	signal.Notify(sigchan, os.Interrupt)
+	go func() {
+		for range sigchan {
+			// quit cleanly and the new terminal item is on a new line
+			fmt.Println()
+			signal.Stop(sigchan)
+			close(sigchan)
+			RestoreTerminal(fd, state)
+			os.Exit(1)
+		}
+	}()
+}
diff --git a/vendor/github.com/docker/docker/pkg/term/term_windows.go b/vendor/github.com/docker/docker/pkg/term/term_windows.go
new file mode 100644
index 00000000..a3c3db13
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/term/term_windows.go
@@ -0,0 +1,221 @@
+package term // import "github.com/docker/docker/pkg/term"
+
+import (
+	"io"
+	"os"
+	"os/signal"
+	"syscall" // used for STD_INPUT_HANDLE, STD_OUTPUT_HANDLE and STD_ERROR_HANDLE
+
+	"github.com/Azure/go-ansiterm/winterm"
+	"github.com/docker/docker/pkg/term/windows"
+)
+
+// State holds the console mode for the terminal.
+type State struct {
+	mode uint32
+}
+
+// Winsize is used for window size.
+type Winsize struct {
+	Height uint16
+	Width  uint16
+}
+
+// vtInputSupported is true if winterm.ENABLE_VIRTUAL_TERMINAL_INPUT is supported by the console
+var vtInputSupported bool
+
+// StdStreams returns the standard streams (stdin, stdout, stderr).
+func StdStreams() (stdIn io.ReadCloser, stdOut, stdErr io.Writer) {
+	// Turn on VT handling on all std handles, if possible. This might
+	// fail, in which case we will fall back to terminal emulation.
+	var emulateStdin, emulateStdout, emulateStderr bool
+	fd := os.Stdin.Fd()
+	if mode, err := winterm.GetConsoleMode(fd); err == nil {
+		// Validate that winterm.ENABLE_VIRTUAL_TERMINAL_INPUT is supported, but do not set it.
+		if err = winterm.SetConsoleMode(fd, mode|winterm.ENABLE_VIRTUAL_TERMINAL_INPUT); err != nil {
+			emulateStdin = true
+		} else {
+			vtInputSupported = true
+		}
+		// Unconditionally set the console mode back even on failure because SetConsoleMode
+		// remembers invalid bits on input handles.
+		winterm.SetConsoleMode(fd, mode)
+	}
+
+	fd = os.Stdout.Fd()
+	if mode, err := winterm.GetConsoleMode(fd); err == nil {
+		// Validate winterm.DISABLE_NEWLINE_AUTO_RETURN is supported, but do not set it.
+		if err = winterm.SetConsoleMode(fd, mode|winterm.ENABLE_VIRTUAL_TERMINAL_PROCESSING|winterm.DISABLE_NEWLINE_AUTO_RETURN); err != nil {
+			emulateStdout = true
+		} else {
+			winterm.SetConsoleMode(fd, mode|winterm.ENABLE_VIRTUAL_TERMINAL_PROCESSING)
+		}
+	}
+
+	fd = os.Stderr.Fd()
+	if mode, err := winterm.GetConsoleMode(fd); err == nil {
+		// Validate winterm.DISABLE_NEWLINE_AUTO_RETURN is supported, but do not set it.
+		if err = winterm.SetConsoleMode(fd, mode|winterm.ENABLE_VIRTUAL_TERMINAL_PROCESSING|winterm.DISABLE_NEWLINE_AUTO_RETURN); err != nil {
+			emulateStderr = true
+		} else {
+			winterm.SetConsoleMode(fd, mode|winterm.ENABLE_VIRTUAL_TERMINAL_PROCESSING)
+		}
+	}
+
+	// Temporarily use STD_INPUT_HANDLE, STD_OUTPUT_HANDLE and
+	// STD_ERROR_HANDLE from syscall rather than x/sys/windows as long as
+	// go-ansiterm hasn't switch to x/sys/windows.
+	// TODO: switch back to x/sys/windows once go-ansiterm has switched
+	if emulateStdin {
+		stdIn = windowsconsole.NewAnsiReader(syscall.STD_INPUT_HANDLE)
+	} else {
+		stdIn = os.Stdin
+	}
+
+	if emulateStdout {
+		stdOut = windowsconsole.NewAnsiWriter(syscall.STD_OUTPUT_HANDLE)
+	} else {
+		stdOut = os.Stdout
+	}
+
+	if emulateStderr {
+		stdErr = windowsconsole.NewAnsiWriter(syscall.STD_ERROR_HANDLE)
+	} else {
+		stdErr = os.Stderr
+	}
+
+	return
+}
+
+// GetFdInfo returns the file descriptor for an os.File and indicates whether the file represents a terminal.
+func GetFdInfo(in interface{}) (uintptr, bool) {
+	return windowsconsole.GetHandleInfo(in)
+}
+
+// GetWinsize returns the window size based on the specified file descriptor.
+func GetWinsize(fd uintptr) (*Winsize, error) {
+	info, err := winterm.GetConsoleScreenBufferInfo(fd)
+	if err != nil {
+		return nil, err
+	}
+
+	winsize := &Winsize{
+		Width:  uint16(info.Window.Right - info.Window.Left + 1),
+		Height: uint16(info.Window.Bottom - info.Window.Top + 1),
+	}
+
+	return winsize, nil
+}
+
+// IsTerminal returns true if the given file descriptor is a terminal.
+func IsTerminal(fd uintptr) bool {
+	return windowsconsole.IsConsole(fd)
+}
+
+// RestoreTerminal restores the terminal connected to the given file descriptor
+// to a previous state.
+func RestoreTerminal(fd uintptr, state *State) error {
+	return winterm.SetConsoleMode(fd, state.mode)
+}
+
+// SaveState saves the state of the terminal connected to the given file descriptor.
+func SaveState(fd uintptr) (*State, error) {
+	mode, e := winterm.GetConsoleMode(fd)
+	if e != nil {
+		return nil, e
+	}
+
+	return &State{mode: mode}, nil
+}
+
+// DisableEcho disables echo for the terminal connected to the given file descriptor.
+// -- See https://msdn.microsoft.com/en-us/library/windows/desktop/ms683462(v=vs.85).aspx
+func DisableEcho(fd uintptr, state *State) error {
+	mode := state.mode
+	mode &^= winterm.ENABLE_ECHO_INPUT
+	mode |= winterm.ENABLE_PROCESSED_INPUT | winterm.ENABLE_LINE_INPUT
+	err := winterm.SetConsoleMode(fd, mode)
+	if err != nil {
+		return err
+	}
+
+	// Register an interrupt handler to catch and restore prior state
+	restoreAtInterrupt(fd, state)
+	return nil
+}
+
+// SetRawTerminal puts the terminal connected to the given file descriptor into
+// raw mode and returns the previous state. On UNIX, this puts both the input
+// and output into raw mode. On Windows, it only puts the input into raw mode.
+func SetRawTerminal(fd uintptr) (*State, error) {
+	state, err := MakeRaw(fd)
+	if err != nil {
+		return nil, err
+	}
+
+	// Register an interrupt handler to catch and restore prior state
+	restoreAtInterrupt(fd, state)
+	return state, err
+}
+
+// SetRawTerminalOutput puts the output of terminal connected to the given file
+// descriptor into raw mode. On UNIX, this does nothing and returns nil for the
+// state. On Windows, it disables LF -> CRLF translation.
+func SetRawTerminalOutput(fd uintptr) (*State, error) {
+	state, err := SaveState(fd)
+	if err != nil {
+		return nil, err
+	}
+
+	// Ignore failures, since winterm.DISABLE_NEWLINE_AUTO_RETURN might not be supported on this
+	// version of Windows.
+	winterm.SetConsoleMode(fd, state.mode|winterm.DISABLE_NEWLINE_AUTO_RETURN)
+	return state, err
+}
+
+// MakeRaw puts the terminal (Windows Console) connected to the given file descriptor into raw
+// mode and returns the previous state of the terminal so that it can be restored.
+func MakeRaw(fd uintptr) (*State, error) {
+	state, err := SaveState(fd)
+	if err != nil {
+		return nil, err
+	}
+
+	mode := state.mode
+
+	// See
+	// -- https://msdn.microsoft.com/en-us/library/windows/desktop/ms686033(v=vs.85).aspx
+	// -- https://msdn.microsoft.com/en-us/library/windows/desktop/ms683462(v=vs.85).aspx
+
+	// Disable these modes
+	mode &^= winterm.ENABLE_ECHO_INPUT
+	mode &^= winterm.ENABLE_LINE_INPUT
+	mode &^= winterm.ENABLE_MOUSE_INPUT
+	mode &^= winterm.ENABLE_WINDOW_INPUT
+	mode &^= winterm.ENABLE_PROCESSED_INPUT
+
+	// Enable these modes
+	mode |= winterm.ENABLE_EXTENDED_FLAGS
+	mode |= winterm.ENABLE_INSERT_MODE
+	mode |= winterm.ENABLE_QUICK_EDIT_MODE
+	if vtInputSupported {
+		mode |= winterm.ENABLE_VIRTUAL_TERMINAL_INPUT
+	}
+
+	err = winterm.SetConsoleMode(fd, mode)
+	if err != nil {
+		return nil, err
+	}
+	return state, nil
+}
+
+func restoreAtInterrupt(fd uintptr, state *State) {
+	sigchan := make(chan os.Signal, 1)
+	signal.Notify(sigchan, os.Interrupt)
+
+	go func() {
+		_ = <-sigchan
+		RestoreTerminal(fd, state)
+		os.Exit(0)
+	}()
+}
diff --git a/vendor/github.com/docker/docker/pkg/term/termios_bsd.go b/vendor/github.com/docker/docker/pkg/term/termios_bsd.go
new file mode 100644
index 00000000..48b16f52
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/term/termios_bsd.go
@@ -0,0 +1,42 @@
+// +build darwin freebsd openbsd netbsd
+
+package term // import "github.com/docker/docker/pkg/term"
+
+import (
+	"unsafe"
+
+	"golang.org/x/sys/unix"
+)
+
+const (
+	getTermios = unix.TIOCGETA
+	setTermios = unix.TIOCSETA
+)
+
+// Termios is the Unix API for terminal I/O.
+type Termios unix.Termios
+
+// MakeRaw put the terminal connected to the given file descriptor into raw
+// mode and returns the previous state of the terminal so that it can be
+// restored.
+func MakeRaw(fd uintptr) (*State, error) {
+	var oldState State
+	if _, _, err := unix.Syscall(unix.SYS_IOCTL, fd, getTermios, uintptr(unsafe.Pointer(&oldState.termios))); err != 0 {
+		return nil, err
+	}
+
+	newState := oldState.termios
+	newState.Iflag &^= (unix.IGNBRK | unix.BRKINT | unix.PARMRK | unix.ISTRIP | unix.INLCR | unix.IGNCR | unix.ICRNL | unix.IXON)
+	newState.Oflag &^= unix.OPOST
+	newState.Lflag &^= (unix.ECHO | unix.ECHONL | unix.ICANON | unix.ISIG | unix.IEXTEN)
+	newState.Cflag &^= (unix.CSIZE | unix.PARENB)
+	newState.Cflag |= unix.CS8
+	newState.Cc[unix.VMIN] = 1
+	newState.Cc[unix.VTIME] = 0
+
+	if _, _, err := unix.Syscall(unix.SYS_IOCTL, fd, setTermios, uintptr(unsafe.Pointer(&newState))); err != 0 {
+		return nil, err
+	}
+
+	return &oldState, nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/term/termios_linux.go b/vendor/github.com/docker/docker/pkg/term/termios_linux.go
new file mode 100644
index 00000000..6d4c63fd
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/term/termios_linux.go
@@ -0,0 +1,39 @@
+package term // import "github.com/docker/docker/pkg/term"
+
+import (
+	"golang.org/x/sys/unix"
+)
+
+const (
+	getTermios = unix.TCGETS
+	setTermios = unix.TCSETS
+)
+
+// Termios is the Unix API for terminal I/O.
+type Termios unix.Termios
+
+// MakeRaw put the terminal connected to the given file descriptor into raw
+// mode and returns the previous state of the terminal so that it can be
+// restored.
+func MakeRaw(fd uintptr) (*State, error) {
+	termios, err := unix.IoctlGetTermios(int(fd), getTermios)
+	if err != nil {
+		return nil, err
+	}
+
+	var oldState State
+	oldState.termios = Termios(*termios)
+
+	termios.Iflag &^= (unix.IGNBRK | unix.BRKINT | unix.PARMRK | unix.ISTRIP | unix.INLCR | unix.IGNCR | unix.ICRNL | unix.IXON)
+	termios.Oflag &^= unix.OPOST
+	termios.Lflag &^= (unix.ECHO | unix.ECHONL | unix.ICANON | unix.ISIG | unix.IEXTEN)
+	termios.Cflag &^= (unix.CSIZE | unix.PARENB)
+	termios.Cflag |= unix.CS8
+	termios.Cc[unix.VMIN] = 1
+	termios.Cc[unix.VTIME] = 0
+
+	if err := unix.IoctlSetTermios(int(fd), setTermios, termios); err != nil {
+		return nil, err
+	}
+	return &oldState, nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/term/windows/ansi_reader.go b/vendor/github.com/docker/docker/pkg/term/windows/ansi_reader.go
new file mode 100644
index 00000000..1d7c452c
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/term/windows/ansi_reader.go
@@ -0,0 +1,263 @@
+// +build windows
+
+package windowsconsole // import "github.com/docker/docker/pkg/term/windows"
+
+import (
+	"bytes"
+	"errors"
+	"fmt"
+	"io"
+	"os"
+	"strings"
+	"unsafe"
+
+	ansiterm "github.com/Azure/go-ansiterm"
+	"github.com/Azure/go-ansiterm/winterm"
+)
+
+const (
+	escapeSequence = ansiterm.KEY_ESC_CSI
+)
+
+// ansiReader wraps a standard input file (e.g., os.Stdin) providing ANSI sequence translation.
+type ansiReader struct {
+	file     *os.File
+	fd       uintptr
+	buffer   []byte
+	cbBuffer int
+	command  []byte
+}
+
+// NewAnsiReader returns an io.ReadCloser that provides VT100 terminal emulation on top of a
+// Windows console input handle.
+func NewAnsiReader(nFile int) io.ReadCloser {
+	initLogger()
+	file, fd := winterm.GetStdFile(nFile)
+	return &ansiReader{
+		file:    file,
+		fd:      fd,
+		command: make([]byte, 0, ansiterm.ANSI_MAX_CMD_LENGTH),
+		buffer:  make([]byte, 0),
+	}
+}
+
+// Close closes the wrapped file.
+func (ar *ansiReader) Close() (err error) {
+	return ar.file.Close()
+}
+
+// Fd returns the file descriptor of the wrapped file.
+func (ar *ansiReader) Fd() uintptr {
+	return ar.fd
+}
+
+// Read reads up to len(p) bytes of translated input events into p.
+func (ar *ansiReader) Read(p []byte) (int, error) {
+	if len(p) == 0 {
+		return 0, nil
+	}
+
+	// Previously read bytes exist, read as much as we can and return
+	if len(ar.buffer) > 0 {
+		logger.Debugf("Reading previously cached bytes")
+
+		originalLength := len(ar.buffer)
+		copiedLength := copy(p, ar.buffer)
+
+		if copiedLength == originalLength {
+			ar.buffer = make([]byte, 0, len(p))
+		} else {
+			ar.buffer = ar.buffer[copiedLength:]
+		}
+
+		logger.Debugf("Read from cache p[%d]: % x", copiedLength, p)
+		return copiedLength, nil
+	}
+
+	// Read and translate key events
+	events, err := readInputEvents(ar.fd, len(p))
+	if err != nil {
+		return 0, err
+	} else if len(events) == 0 {
+		logger.Debug("No input events detected")
+		return 0, nil
+	}
+
+	keyBytes := translateKeyEvents(events, []byte(escapeSequence))
+
+	// Save excess bytes and right-size keyBytes
+	if len(keyBytes) > len(p) {
+		logger.Debugf("Received %d keyBytes, only room for %d bytes", len(keyBytes), len(p))
+		ar.buffer = keyBytes[len(p):]
+		keyBytes = keyBytes[:len(p)]
+	} else if len(keyBytes) == 0 {
+		logger.Debug("No key bytes returned from the translator")
+		return 0, nil
+	}
+
+	copiedLength := copy(p, keyBytes)
+	if copiedLength != len(keyBytes) {
+		return 0, errors.New("unexpected copy length encountered")
+	}
+
+	logger.Debugf("Read        p[%d]: % x", copiedLength, p)
+	logger.Debugf("Read keyBytes[%d]: % x", copiedLength, keyBytes)
+	return copiedLength, nil
+}
+
+// readInputEvents polls until at least one event is available.
+func readInputEvents(fd uintptr, maxBytes int) ([]winterm.INPUT_RECORD, error) {
+	// Determine the maximum number of records to retrieve
+	// -- Cast around the type system to obtain the size of a single INPUT_RECORD.
+	//    unsafe.Sizeof requires an expression vs. a type-reference; the casting
+	//    tricks the type system into believing it has such an expression.
+	recordSize := int(unsafe.Sizeof(*((*winterm.INPUT_RECORD)(unsafe.Pointer(&maxBytes)))))
+	countRecords := maxBytes / recordSize
+	if countRecords > ansiterm.MAX_INPUT_EVENTS {
+		countRecords = ansiterm.MAX_INPUT_EVENTS
+	} else if countRecords == 0 {
+		countRecords = 1
+	}
+	logger.Debugf("[windows] readInputEvents: Reading %v records (buffer size %v, record size %v)", countRecords, maxBytes, recordSize)
+
+	// Wait for and read input events
+	events := make([]winterm.INPUT_RECORD, countRecords)
+	nEvents := uint32(0)
+	eventsExist, err := winterm.WaitForSingleObject(fd, winterm.WAIT_INFINITE)
+	if err != nil {
+		return nil, err
+	}
+
+	if eventsExist {
+		err = winterm.ReadConsoleInput(fd, events, &nEvents)
+		if err != nil {
+			return nil, err
+		}
+	}
+
+	// Return a slice restricted to the number of returned records
+	logger.Debugf("[windows] readInputEvents: Read %v events", nEvents)
+	return events[:nEvents], nil
+}
+
+// KeyEvent Translation Helpers
+
+var arrowKeyMapPrefix = map[uint16]string{
+	winterm.VK_UP:    "%s%sA",
+	winterm.VK_DOWN:  "%s%sB",
+	winterm.VK_RIGHT: "%s%sC",
+	winterm.VK_LEFT:  "%s%sD",
+}
+
+var keyMapPrefix = map[uint16]string{
+	winterm.VK_UP:     "\x1B[%sA",
+	winterm.VK_DOWN:   "\x1B[%sB",
+	winterm.VK_RIGHT:  "\x1B[%sC",
+	winterm.VK_LEFT:   "\x1B[%sD",
+	winterm.VK_HOME:   "\x1B[1%s~", // showkey shows ^[[1
+	winterm.VK_END:    "\x1B[4%s~", // showkey shows ^[[4
+	winterm.VK_INSERT: "\x1B[2%s~",
+	winterm.VK_DELETE: "\x1B[3%s~",
+	winterm.VK_PRIOR:  "\x1B[5%s~",
+	winterm.VK_NEXT:   "\x1B[6%s~",
+	winterm.VK_F1:     "",
+	winterm.VK_F2:     "",
+	winterm.VK_F3:     "\x1B[13%s~",
+	winterm.VK_F4:     "\x1B[14%s~",
+	winterm.VK_F5:     "\x1B[15%s~",
+	winterm.VK_F6:     "\x1B[17%s~",
+	winterm.VK_F7:     "\x1B[18%s~",
+	winterm.VK_F8:     "\x1B[19%s~",
+	winterm.VK_F9:     "\x1B[20%s~",
+	winterm.VK_F10:    "\x1B[21%s~",
+	winterm.VK_F11:    "\x1B[23%s~",
+	winterm.VK_F12:    "\x1B[24%s~",
+}
+
+// translateKeyEvents converts the input events into the appropriate ANSI string.
+func translateKeyEvents(events []winterm.INPUT_RECORD, escapeSequence []byte) []byte {
+	var buffer bytes.Buffer
+	for _, event := range events {
+		if event.EventType == winterm.KEY_EVENT && event.KeyEvent.KeyDown != 0 {
+			buffer.WriteString(keyToString(&event.KeyEvent, escapeSequence))
+		}
+	}
+
+	return buffer.Bytes()
+}
+
+// keyToString maps the given input event record to the corresponding string.
+func keyToString(keyEvent *winterm.KEY_EVENT_RECORD, escapeSequence []byte) string {
+	if keyEvent.UnicodeChar == 0 {
+		return formatVirtualKey(keyEvent.VirtualKeyCode, keyEvent.ControlKeyState, escapeSequence)
+	}
+
+	_, alt, control := getControlKeys(keyEvent.ControlKeyState)
+	if control {
+		// TODO(azlinux): Implement following control sequences
+		// <Ctrl>-D  Signals the end of input from the keyboard; also exits current shell.
+		// <Ctrl>-H  Deletes the first character to the left of the cursor. Also called the ERASE key.
+		// <Ctrl>-Q  Restarts printing after it has been stopped with <Ctrl>-s.
+		// <Ctrl>-S  Suspends printing on the screen (does not stop the program).
+		// <Ctrl>-U  Deletes all characters on the current line. Also called the KILL key.
+		// <Ctrl>-E  Quits current command and creates a core
+
+	}
+
+	// <Alt>+Key generates ESC N Key
+	if !control && alt {
+		return ansiterm.KEY_ESC_N + strings.ToLower(string(keyEvent.UnicodeChar))
+	}
+
+	return string(keyEvent.UnicodeChar)
+}
+
+// formatVirtualKey converts a virtual key (e.g., up arrow) into the appropriate ANSI string.
+func formatVirtualKey(key uint16, controlState uint32, escapeSequence []byte) string {
+	shift, alt, control := getControlKeys(controlState)
+	modifier := getControlKeysModifier(shift, alt, control)
+
+	if format, ok := arrowKeyMapPrefix[key]; ok {
+		return fmt.Sprintf(format, escapeSequence, modifier)
+	}
+
+	if format, ok := keyMapPrefix[key]; ok {
+		return fmt.Sprintf(format, modifier)
+	}
+
+	return ""
+}
+
+// getControlKeys extracts the shift, alt, and ctrl key states.
+func getControlKeys(controlState uint32) (shift, alt, control bool) {
+	shift = 0 != (controlState & winterm.SHIFT_PRESSED)
+	alt = 0 != (controlState & (winterm.LEFT_ALT_PRESSED | winterm.RIGHT_ALT_PRESSED))
+	control = 0 != (controlState & (winterm.LEFT_CTRL_PRESSED | winterm.RIGHT_CTRL_PRESSED))
+	return shift, alt, control
+}
+
+// getControlKeysModifier returns the ANSI modifier for the given combination of control keys.
+func getControlKeysModifier(shift, alt, control bool) string {
+	if shift && alt && control {
+		return ansiterm.KEY_CONTROL_PARAM_8
+	}
+	if alt && control {
+		return ansiterm.KEY_CONTROL_PARAM_7
+	}
+	if shift && control {
+		return ansiterm.KEY_CONTROL_PARAM_6
+	}
+	if control {
+		return ansiterm.KEY_CONTROL_PARAM_5
+	}
+	if shift && alt {
+		return ansiterm.KEY_CONTROL_PARAM_4
+	}
+	if alt {
+		return ansiterm.KEY_CONTROL_PARAM_3
+	}
+	if shift {
+		return ansiterm.KEY_CONTROL_PARAM_2
+	}
+	return ""
+}
diff --git a/vendor/github.com/docker/docker/pkg/term/windows/ansi_writer.go b/vendor/github.com/docker/docker/pkg/term/windows/ansi_writer.go
new file mode 100644
index 00000000..7799a03f
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/term/windows/ansi_writer.go
@@ -0,0 +1,64 @@
+// +build windows
+
+package windowsconsole // import "github.com/docker/docker/pkg/term/windows"
+
+import (
+	"io"
+	"os"
+
+	ansiterm "github.com/Azure/go-ansiterm"
+	"github.com/Azure/go-ansiterm/winterm"
+)
+
+// ansiWriter wraps a standard output file (e.g., os.Stdout) providing ANSI sequence translation.
+type ansiWriter struct {
+	file           *os.File
+	fd             uintptr
+	infoReset      *winterm.CONSOLE_SCREEN_BUFFER_INFO
+	command        []byte
+	escapeSequence []byte
+	inAnsiSequence bool
+	parser         *ansiterm.AnsiParser
+}
+
+// NewAnsiWriter returns an io.Writer that provides VT100 terminal emulation on top of a
+// Windows console output handle.
+func NewAnsiWriter(nFile int) io.Writer {
+	initLogger()
+	file, fd := winterm.GetStdFile(nFile)
+	info, err := winterm.GetConsoleScreenBufferInfo(fd)
+	if err != nil {
+		return nil
+	}
+
+	parser := ansiterm.CreateParser("Ground", winterm.CreateWinEventHandler(fd, file))
+	logger.Infof("newAnsiWriter: parser %p", parser)
+
+	aw := &ansiWriter{
+		file:           file,
+		fd:             fd,
+		infoReset:      info,
+		command:        make([]byte, 0, ansiterm.ANSI_MAX_CMD_LENGTH),
+		escapeSequence: []byte(ansiterm.KEY_ESC_CSI),
+		parser:         parser,
+	}
+
+	logger.Infof("newAnsiWriter: aw.parser %p", aw.parser)
+	logger.Infof("newAnsiWriter: %v", aw)
+	return aw
+}
+
+func (aw *ansiWriter) Fd() uintptr {
+	return aw.fd
+}
+
+// Write writes len(p) bytes from p to the underlying data stream.
+func (aw *ansiWriter) Write(p []byte) (total int, err error) {
+	if len(p) == 0 {
+		return 0, nil
+	}
+
+	logger.Infof("Write: % x", p)
+	logger.Infof("Write: %s", string(p))
+	return aw.parser.Parse(p)
+}
diff --git a/vendor/github.com/docker/docker/pkg/term/windows/console.go b/vendor/github.com/docker/docker/pkg/term/windows/console.go
new file mode 100644
index 00000000..52740197
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/term/windows/console.go
@@ -0,0 +1,35 @@
+// +build windows
+
+package windowsconsole // import "github.com/docker/docker/pkg/term/windows"
+
+import (
+	"os"
+
+	"github.com/Azure/go-ansiterm/winterm"
+)
+
+// GetHandleInfo returns file descriptor and bool indicating whether the file is a console.
+func GetHandleInfo(in interface{}) (uintptr, bool) {
+	switch t := in.(type) {
+	case *ansiReader:
+		return t.Fd(), true
+	case *ansiWriter:
+		return t.Fd(), true
+	}
+
+	var inFd uintptr
+	var isTerminal bool
+
+	if file, ok := in.(*os.File); ok {
+		inFd = file.Fd()
+		isTerminal = IsConsole(inFd)
+	}
+	return inFd, isTerminal
+}
+
+// IsConsole returns true if the given file descriptor is a Windows Console.
+// The code assumes that GetConsoleMode will return an error for file descriptors that are not a console.
+func IsConsole(fd uintptr) bool {
+	_, e := winterm.GetConsoleMode(fd)
+	return e == nil
+}
diff --git a/vendor/github.com/docker/docker/pkg/term/windows/windows.go b/vendor/github.com/docker/docker/pkg/term/windows/windows.go
new file mode 100644
index 00000000..3e5593ca
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/term/windows/windows.go
@@ -0,0 +1,33 @@
+// These files implement ANSI-aware input and output streams for use by the Docker Windows client.
+// When asked for the set of standard streams (e.g., stdin, stdout, stderr), the code will create
+// and return pseudo-streams that convert ANSI sequences to / from Windows Console API calls.
+
+package windowsconsole // import "github.com/docker/docker/pkg/term/windows"
+
+import (
+	"io/ioutil"
+	"os"
+	"sync"
+
+	"github.com/Azure/go-ansiterm"
+	"github.com/sirupsen/logrus"
+)
+
+var logger *logrus.Logger
+var initOnce sync.Once
+
+func initLogger() {
+	initOnce.Do(func() {
+		logFile := ioutil.Discard
+
+		if isDebugEnv := os.Getenv(ansiterm.LogEnv); isDebugEnv == "1" {
+			logFile, _ = os.Create("ansiReaderWriter.log")
+		}
+
+		logger = &logrus.Logger{
+			Out:       logFile,
+			Formatter: new(logrus.TextFormatter),
+			Level:     logrus.DebugLevel,
+		}
+	})
+}
diff --git a/vendor/github.com/docker/docker/pkg/term/winsize.go b/vendor/github.com/docker/docker/pkg/term/winsize.go
new file mode 100644
index 00000000..a19663ad
--- /dev/null
+++ b/vendor/github.com/docker/docker/pkg/term/winsize.go
@@ -0,0 +1,20 @@
+// +build !windows
+
+package term // import "github.com/docker/docker/pkg/term"
+
+import (
+	"golang.org/x/sys/unix"
+)
+
+// GetWinsize returns the window size based on the specified file descriptor.
+func GetWinsize(fd uintptr) (*Winsize, error) {
+	uws, err := unix.IoctlGetWinsize(int(fd), unix.TIOCGWINSZ)
+	ws := &Winsize{Height: uws.Row, Width: uws.Col, x: uws.Xpixel, y: uws.Ypixel}
+	return ws, err
+}
+
+// SetWinsize tries to set the specified window size for the specified file descriptor.
+func SetWinsize(fd uintptr, ws *Winsize) error {
+	uws := &unix.Winsize{Row: ws.Height, Col: ws.Width, Xpixel: ws.x, Ypixel: ws.y}
+	return unix.IoctlSetWinsize(int(fd), unix.TIOCSWINSZ, uws)
+}
diff --git a/vendor/github.com/docker/spdystream/CONTRIBUTING.md b/vendor/github.com/docker/spdystream/CONTRIBUTING.md
new file mode 100644
index 00000000..d4eddcc5
--- /dev/null
+++ b/vendor/github.com/docker/spdystream/CONTRIBUTING.md
@@ -0,0 +1,13 @@
+# Contributing to SpdyStream
+
+Want to hack on spdystream? Awesome! Here are instructions to get you
+started.
+
+SpdyStream is a part of the [Docker](https://docker.io) project, and follows
+the same rules and principles. If you're already familiar with the way
+Docker does things, you'll feel right at home.
+
+Otherwise, go read
+[Docker's contributions guidelines](https://github.com/dotcloud/docker/blob/master/CONTRIBUTING.md).
+
+Happy hacking!
diff --git a/vendor/github.com/docker/spdystream/LICENSE b/vendor/github.com/docker/spdystream/LICENSE
new file mode 100644
index 00000000..9e4bd4db
--- /dev/null
+++ b/vendor/github.com/docker/spdystream/LICENSE
@@ -0,0 +1,191 @@
+
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   Copyright 2014-2015 Docker, Inc.
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff --git a/vendor/github.com/docker/spdystream/LICENSE.docs b/vendor/github.com/docker/spdystream/LICENSE.docs
new file mode 100644
index 00000000..e26cd4fc
--- /dev/null
+++ b/vendor/github.com/docker/spdystream/LICENSE.docs
@@ -0,0 +1,425 @@
+Attribution-ShareAlike 4.0 International
+
+=======================================================================
+
+Creative Commons Corporation ("Creative Commons") is not a law firm and
+does not provide legal services or legal advice. Distribution of
+Creative Commons public licenses does not create a lawyer-client or
+other relationship. Creative Commons makes its licenses and related
+information available on an "as-is" basis. Creative Commons gives no
+warranties regarding its licenses, any material licensed under their
+terms and conditions, or any related information. Creative Commons
+disclaims all liability for damages resulting from their use to the
+fullest extent possible.
+
+Using Creative Commons Public Licenses
+
+Creative Commons public licenses provide a standard set of terms and
+conditions that creators and other rights holders may use to share
+original works of authorship and other material subject to copyright
+and certain other rights specified in the public license below. The
+following considerations are for informational purposes only, are not
+exhaustive, and do not form part of our licenses.
+
+     Considerations for licensors: Our public licenses are
+     intended for use by those authorized to give the public
+     permission to use material in ways otherwise restricted by
+     copyright and certain other rights. Our licenses are
+     irrevocable. Licensors should read and understand the terms
+     and conditions of the license they choose before applying it.
+     Licensors should also secure all rights necessary before
+     applying our licenses so that the public can reuse the
+     material as expected. Licensors should clearly mark any
+     material not subject to the license. This includes other CC-
+     licensed material, or material used under an exception or
+     limitation to copyright. More considerations for licensors:
+	wiki.creativecommons.org/Considerations_for_licensors
+
+     Considerations for the public: By using one of our public
+     licenses, a licensor grants the public permission to use the
+     licensed material under specified terms and conditions. If
+     the licensor's permission is not necessary for any reason--for
+     example, because of any applicable exception or limitation to
+     copyright--then that use is not regulated by the license. Our
+     licenses grant only permissions under copyright and certain
+     other rights that a licensor has authority to grant. Use of
+     the licensed material may still be restricted for other
+     reasons, including because others have copyright or other
+     rights in the material. A licensor may make special requests,
+     such as asking that all changes be marked or described.
+     Although not required by our licenses, you are encouraged to
+     respect those requests where reasonable. More_considerations
+     for the public:
+	wiki.creativecommons.org/Considerations_for_licensees
+
+=======================================================================
+
+Creative Commons Attribution-ShareAlike 4.0 International Public
+License
+
+By exercising the Licensed Rights (defined below), You accept and agree
+to be bound by the terms and conditions of this Creative Commons
+Attribution-ShareAlike 4.0 International Public License ("Public
+License"). To the extent this Public License may be interpreted as a
+contract, You are granted the Licensed Rights in consideration of Your
+acceptance of these terms and conditions, and the Licensor grants You
+such rights in consideration of benefits the Licensor receives from
+making the Licensed Material available under these terms and
+conditions.
+
+
+Section 1 -- Definitions.
+
+  a. Adapted Material means material subject to Copyright and Similar
+     Rights that is derived from or based upon the Licensed Material
+     and in which the Licensed Material is translated, altered,
+     arranged, transformed, or otherwise modified in a manner requiring
+     permission under the Copyright and Similar Rights held by the
+     Licensor. For purposes of this Public License, where the Licensed
+     Material is a musical work, performance, or sound recording,
+     Adapted Material is always produced where the Licensed Material is
+     synched in timed relation with a moving image.
+
+  b. Adapter's License means the license You apply to Your Copyright
+     and Similar Rights in Your contributions to Adapted Material in
+     accordance with the terms and conditions of this Public License.
+
+  c. BY-SA Compatible License means a license listed at
+     creativecommons.org/compatiblelicenses, approved by Creative
+     Commons as essentially the equivalent of this Public License.
+
+  d. Copyright and Similar Rights means copyright and/or similar rights
+     closely related to copyright including, without limitation,
+     performance, broadcast, sound recording, and Sui Generis Database
+     Rights, without regard to how the rights are labeled or
+     categorized. For purposes of this Public License, the rights
+     specified in Section 2(b)(1)-(2) are not Copyright and Similar
+     Rights.
+
+  e. Effective Technological Measures means those measures that, in the
+     absence of proper authority, may not be circumvented under laws
+     fulfilling obligations under Article 11 of the WIPO Copyright
+     Treaty adopted on December 20, 1996, and/or similar international
+     agreements.
+
+  f. Exceptions and Limitations means fair use, fair dealing, and/or
+     any other exception or limitation to Copyright and Similar Rights
+     that applies to Your use of the Licensed Material.
+
+  g. License Elements means the license attributes listed in the name
+     of a Creative Commons Public License. The License Elements of this
+     Public License are Attribution and ShareAlike.
+
+  h. Licensed Material means the artistic or literary work, database,
+     or other material to which the Licensor applied this Public
+     License.
+
+  i. Licensed Rights means the rights granted to You subject to the
+     terms and conditions of this Public License, which are limited to
+     all Copyright and Similar Rights that apply to Your use of the
+     Licensed Material and that the Licensor has authority to license.
+
+  j. Licensor means the individual(s) or entity(ies) granting rights
+     under this Public License.
+
+  k. Share means to provide material to the public by any means or
+     process that requires permission under the Licensed Rights, such
+     as reproduction, public display, public performance, distribution,
+     dissemination, communication, or importation, and to make material
+     available to the public including in ways that members of the
+     public may access the material from a place and at a time
+     individually chosen by them.
+
+  l. Sui Generis Database Rights means rights other than copyright
+     resulting from Directive 96/9/EC of the European Parliament and of
+     the Council of 11 March 1996 on the legal protection of databases,
+     as amended and/or succeeded, as well as other essentially
+     equivalent rights anywhere in the world.
+
+  m. You means the individual or entity exercising the Licensed Rights
+     under this Public License. Your has a corresponding meaning.
+
+
+Section 2 -- Scope.
+
+  a. License grant.
+
+       1. Subject to the terms and conditions of this Public License,
+          the Licensor hereby grants You a worldwide, royalty-free,
+          non-sublicensable, non-exclusive, irrevocable license to
+          exercise the Licensed Rights in the Licensed Material to:
+
+            a. reproduce and Share the Licensed Material, in whole or
+               in part; and
+
+            b. produce, reproduce, and Share Adapted Material.
+
+       2. Exceptions and Limitations. For the avoidance of doubt, where
+          Exceptions and Limitations apply to Your use, this Public
+          License does not apply, and You do not need to comply with
+          its terms and conditions.
+
+       3. Term. The term of this Public License is specified in Section
+          6(a).
+
+       4. Media and formats; technical modifications allowed. The
+          Licensor authorizes You to exercise the Licensed Rights in
+          all media and formats whether now known or hereafter created,
+          and to make technical modifications necessary to do so. The
+          Licensor waives and/or agrees not to assert any right or
+          authority to forbid You from making technical modifications
+          necessary to exercise the Licensed Rights, including
+          technical modifications necessary to circumvent Effective
+          Technological Measures. For purposes of this Public License,
+          simply making modifications authorized by this Section 2(a)
+          (4) never produces Adapted Material.
+
+       5. Downstream recipients.
+
+            a. Offer from the Licensor -- Licensed Material. Every
+               recipient of the Licensed Material automatically
+               receives an offer from the Licensor to exercise the
+               Licensed Rights under the terms and conditions of this
+               Public License.
+
+            b. Additional offer from the Licensor -- Adapted Material.
+               Every recipient of Adapted Material from You
+               automatically receives an offer from the Licensor to
+               exercise the Licensed Rights in the Adapted Material
+               under the conditions of the Adapter's License You apply.
+
+            c. No downstream restrictions. You may not offer or impose
+               any additional or different terms or conditions on, or
+               apply any Effective Technological Measures to, the
+               Licensed Material if doing so restricts exercise of the
+               Licensed Rights by any recipient of the Licensed
+               Material.
+
+       6. No endorsement. Nothing in this Public License constitutes or
+          may be construed as permission to assert or imply that You
+          are, or that Your use of the Licensed Material is, connected
+          with, or sponsored, endorsed, or granted official status by,
+          the Licensor or others designated to receive attribution as
+          provided in Section 3(a)(1)(A)(i).
+
+  b. Other rights.
+
+       1. Moral rights, such as the right of integrity, are not
+          licensed under this Public License, nor are publicity,
+          privacy, and/or other similar personality rights; however, to
+          the extent possible, the Licensor waives and/or agrees not to
+          assert any such rights held by the Licensor to the limited
+          extent necessary to allow You to exercise the Licensed
+          Rights, but not otherwise.
+
+       2. Patent and trademark rights are not licensed under this
+          Public License.
+
+       3. To the extent possible, the Licensor waives any right to
+          collect royalties from You for the exercise of the Licensed
+          Rights, whether directly or through a collecting society
+          under any voluntary or waivable statutory or compulsory
+          licensing scheme. In all other cases the Licensor expressly
+          reserves any right to collect such royalties.
+
+
+Section 3 -- License Conditions.
+
+Your exercise of the Licensed Rights is expressly made subject to the
+following conditions.
+
+  a. Attribution.
+
+       1. If You Share the Licensed Material (including in modified
+          form), You must:
+
+            a. retain the following if it is supplied by the Licensor
+               with the Licensed Material:
+
+                 i. identification of the creator(s) of the Licensed
+                    Material and any others designated to receive
+                    attribution, in any reasonable manner requested by
+                    the Licensor (including by pseudonym if
+                    designated);
+
+                ii. a copyright notice;
+
+               iii. a notice that refers to this Public License;
+
+                iv. a notice that refers to the disclaimer of
+                    warranties;
+
+                 v. a URI or hyperlink to the Licensed Material to the
+                    extent reasonably practicable;
+
+            b. indicate if You modified the Licensed Material and
+               retain an indication of any previous modifications; and
+
+            c. indicate the Licensed Material is licensed under this
+               Public License, and include the text of, or the URI or
+               hyperlink to, this Public License.
+
+       2. You may satisfy the conditions in Section 3(a)(1) in any
+          reasonable manner based on the medium, means, and context in
+          which You Share the Licensed Material. For example, it may be
+          reasonable to satisfy the conditions by providing a URI or
+          hyperlink to a resource that includes the required
+          information.
+
+       3. If requested by the Licensor, You must remove any of the
+          information required by Section 3(a)(1)(A) to the extent
+          reasonably practicable.
+
+  b. ShareAlike.
+
+     In addition to the conditions in Section 3(a), if You Share
+     Adapted Material You produce, the following conditions also apply.
+
+       1. The Adapter's License You apply must be a Creative Commons
+          license with the same License Elements, this version or
+          later, or a BY-SA Compatible License.
+
+       2. You must include the text of, or the URI or hyperlink to, the
+          Adapter's License You apply. You may satisfy this condition
+          in any reasonable manner based on the medium, means, and
+          context in which You Share Adapted Material.
+
+       3. You may not offer or impose any additional or different terms
+          or conditions on, or apply any Effective Technological
+          Measures to, Adapted Material that restrict exercise of the
+          rights granted under the Adapter's License You apply.
+
+
+Section 4 -- Sui Generis Database Rights.
+
+Where the Licensed Rights include Sui Generis Database Rights that
+apply to Your use of the Licensed Material:
+
+  a. for the avoidance of doubt, Section 2(a)(1) grants You the right
+     to extract, reuse, reproduce, and Share all or a substantial
+     portion of the contents of the database;
+
+  b. if You include all or a substantial portion of the database
+     contents in a database in which You have Sui Generis Database
+     Rights, then the database in which You have Sui Generis Database
+     Rights (but not its individual contents) is Adapted Material,
+
+     including for purposes of Section 3(b); and
+  c. You must comply with the conditions in Section 3(a) if You Share
+     all or a substantial portion of the contents of the database.
+
+For the avoidance of doubt, this Section 4 supplements and does not
+replace Your obligations under this Public License where the Licensed
+Rights include other Copyright and Similar Rights.
+
+
+Section 5 -- Disclaimer of Warranties and Limitation of Liability.
+
+  a. UNLESS OTHERWISE SEPARATELY UNDERTAKEN BY THE LICENSOR, TO THE
+     EXTENT POSSIBLE, THE LICENSOR OFFERS THE LICENSED MATERIAL AS-IS
+     AND AS-AVAILABLE, AND MAKES NO REPRESENTATIONS OR WARRANTIES OF
+     ANY KIND CONCERNING THE LICENSED MATERIAL, WHETHER EXPRESS,
+     IMPLIED, STATUTORY, OR OTHER. THIS INCLUDES, WITHOUT LIMITATION,
+     WARRANTIES OF TITLE, MERCHANTABILITY, FITNESS FOR A PARTICULAR
+     PURPOSE, NON-INFRINGEMENT, ABSENCE OF LATENT OR OTHER DEFECTS,
+     ACCURACY, OR THE PRESENCE OR ABSENCE OF ERRORS, WHETHER OR NOT
+     KNOWN OR DISCOVERABLE. WHERE DISCLAIMERS OF WARRANTIES ARE NOT
+     ALLOWED IN FULL OR IN PART, THIS DISCLAIMER MAY NOT APPLY TO YOU.
+
+  b. TO THE EXTENT POSSIBLE, IN NO EVENT WILL THE LICENSOR BE LIABLE
+     TO YOU ON ANY LEGAL THEORY (INCLUDING, WITHOUT LIMITATION,
+     NEGLIGENCE) OR OTHERWISE FOR ANY DIRECT, SPECIAL, INDIRECT,
+     INCIDENTAL, CONSEQUENTIAL, PUNITIVE, EXEMPLARY, OR OTHER LOSSES,
+     COSTS, EXPENSES, OR DAMAGES ARISING OUT OF THIS PUBLIC LICENSE OR
+     USE OF THE LICENSED MATERIAL, EVEN IF THE LICENSOR HAS BEEN
+     ADVISED OF THE POSSIBILITY OF SUCH LOSSES, COSTS, EXPENSES, OR
+     DAMAGES. WHERE A LIMITATION OF LIABILITY IS NOT ALLOWED IN FULL OR
+     IN PART, THIS LIMITATION MAY NOT APPLY TO YOU.
+
+  c. The disclaimer of warranties and limitation of liability provided
+     above shall be interpreted in a manner that, to the extent
+     possible, most closely approximates an absolute disclaimer and
+     waiver of all liability.
+
+
+Section 6 -- Term and Termination.
+
+  a. This Public License applies for the term of the Copyright and
+     Similar Rights licensed here. However, if You fail to comply with
+     this Public License, then Your rights under this Public License
+     terminate automatically.
+
+  b. Where Your right to use the Licensed Material has terminated under
+     Section 6(a), it reinstates:
+
+       1. automatically as of the date the violation is cured, provided
+          it is cured within 30 days of Your discovery of the
+          violation; or
+
+       2. upon express reinstatement by the Licensor.
+
+     For the avoidance of doubt, this Section 6(b) does not affect any
+     right the Licensor may have to seek remedies for Your violations
+     of this Public License.
+
+  c. For the avoidance of doubt, the Licensor may also offer the
+     Licensed Material under separate terms or conditions or stop
+     distributing the Licensed Material at any time; however, doing so
+     will not terminate this Public License.
+
+  d. Sections 1, 5, 6, 7, and 8 survive termination of this Public
+     License.
+
+
+Section 7 -- Other Terms and Conditions.
+
+  a. The Licensor shall not be bound by any additional or different
+     terms or conditions communicated by You unless expressly agreed.
+
+  b. Any arrangements, understandings, or agreements regarding the
+     Licensed Material not stated herein are separate from and
+     independent of the terms and conditions of this Public License.
+
+
+Section 8 -- Interpretation.
+
+  a. For the avoidance of doubt, this Public License does not, and
+     shall not be interpreted to, reduce, limit, restrict, or impose
+     conditions on any use of the Licensed Material that could lawfully
+     be made without permission under this Public License.
+
+  b. To the extent possible, if any provision of this Public License is
+     deemed unenforceable, it shall be automatically reformed to the
+     minimum extent necessary to make it enforceable. If the provision
+     cannot be reformed, it shall be severed from this Public License
+     without affecting the enforceability of the remaining terms and
+     conditions.
+
+  c. No term or condition of this Public License will be waived and no
+     failure to comply consented to unless expressly agreed to by the
+     Licensor.
+
+  d. Nothing in this Public License constitutes or may be interpreted
+     as a limitation upon, or waiver of, any privileges and immunities
+     that apply to the Licensor or You, including from the legal
+     processes of any jurisdiction or authority.
+
+
+=======================================================================
+
+Creative Commons is not a party to its public licenses.
+Notwithstanding, Creative Commons may elect to apply one of its public
+licenses to material it publishes and in those instances will be
+considered the "Licensor." Except for the limited purpose of indicating
+that material is shared under a Creative Commons public license or as
+otherwise permitted by the Creative Commons policies published at
+creativecommons.org/policies, Creative Commons does not authorize the
+use of the trademark "Creative Commons" or any other trademark or logo
+of Creative Commons without its prior written consent including,
+without limitation, in connection with any unauthorized modifications
+to any of its public licenses or any other arrangements,
+understandings, or agreements concerning use of licensed material. For
+the avoidance of doubt, this paragraph does not form part of the public
+licenses.
+
+Creative Commons may be contacted at creativecommons.org.
diff --git a/vendor/github.com/docker/spdystream/MAINTAINERS b/vendor/github.com/docker/spdystream/MAINTAINERS
new file mode 100644
index 00000000..14e26332
--- /dev/null
+++ b/vendor/github.com/docker/spdystream/MAINTAINERS
@@ -0,0 +1,28 @@
+# Spdystream maintainers file
+#
+# This file describes who runs the docker/spdystream project and how.
+# This is a living document - if you see something out of date or missing, speak up!
+#
+# It is structured to be consumable by both humans and programs.
+# To extract its contents programmatically, use any TOML-compliant parser.
+#
+# This file is compiled into the MAINTAINERS file in docker/opensource.
+#
+[Org]
+	[Org."Core maintainers"]
+		people = [
+			"dmcgowan",
+		]
+
+[people]
+
+# A reference list of all people associated with the project.
+# All other sections should refer to people by their canonical key
+# in the people section.
+
+	# ADD YOURSELF HERE IN ALPHABETICAL ORDER
+
+	[people.dmcgowan]
+	Name = "Derek McGowan"
+	Email = "derek@docker.com"
+	GitHub = "dmcgowan"
diff --git a/vendor/github.com/docker/spdystream/README.md b/vendor/github.com/docker/spdystream/README.md
new file mode 100644
index 00000000..11cccd0a
--- /dev/null
+++ b/vendor/github.com/docker/spdystream/README.md
@@ -0,0 +1,77 @@
+# SpdyStream
+
+A multiplexed stream library using spdy
+
+## Usage
+
+Client example (connecting to mirroring server without auth)
+
+```go
+package main
+
+import (
+	"fmt"
+	"github.com/docker/spdystream"
+	"net"
+	"net/http"
+)
+
+func main() {
+	conn, err := net.Dial("tcp", "localhost:8080")
+	if err != nil {
+		panic(err)
+	}
+	spdyConn, err := spdystream.NewConnection(conn, false)
+	if err != nil {
+		panic(err)
+	}
+	go spdyConn.Serve(spdystream.NoOpStreamHandler)
+	stream, err := spdyConn.CreateStream(http.Header{}, nil, false)
+	if err != nil {
+		panic(err)
+	}
+
+	stream.Wait()
+
+	fmt.Fprint(stream, "Writing to stream")
+
+	buf := make([]byte, 25)
+	stream.Read(buf)
+	fmt.Println(string(buf))
+
+	stream.Close()
+}
+```
+
+Server example (mirroring server without auth)
+
+```go
+package main
+
+import (
+	"github.com/docker/spdystream"
+	"net"
+)
+
+func main() {
+	listener, err := net.Listen("tcp", "localhost:8080")
+	if err != nil {
+		panic(err)
+	}
+	for {
+		conn, err := listener.Accept()
+		if err != nil {
+			panic(err)
+		}
+		spdyConn, err := spdystream.NewConnection(conn, true)
+		if err != nil {
+			panic(err)
+		}
+		go spdyConn.Serve(spdystream.MirrorStreamHandler)
+	}
+}
+```
+
+## Copyright and license
+
+Copyright © 2014-2015 Docker, Inc. All rights reserved, except as follows. Code is released under the Apache 2.0 license. The README.md file, and files in the "docs" folder are licensed under the Creative Commons Attribution 4.0 International License under the terms and conditions set forth in the file "LICENSE.docs". You may obtain a duplicate copy of the same license, titled CC-BY-SA-4.0, at http://creativecommons.org/licenses/by/4.0/.
diff --git a/vendor/github.com/docker/spdystream/connection.go b/vendor/github.com/docker/spdystream/connection.go
new file mode 100644
index 00000000..6031a0db
--- /dev/null
+++ b/vendor/github.com/docker/spdystream/connection.go
@@ -0,0 +1,958 @@
+package spdystream
+
+import (
+	"errors"
+	"fmt"
+	"io"
+	"net"
+	"net/http"
+	"sync"
+	"time"
+
+	"github.com/docker/spdystream/spdy"
+)
+
+var (
+	ErrInvalidStreamId   = errors.New("Invalid stream id")
+	ErrTimeout           = errors.New("Timeout occured")
+	ErrReset             = errors.New("Stream reset")
+	ErrWriteClosedStream = errors.New("Write on closed stream")
+)
+
+const (
+	FRAME_WORKERS = 5
+	QUEUE_SIZE    = 50
+)
+
+type StreamHandler func(stream *Stream)
+
+type AuthHandler func(header http.Header, slot uint8, parent uint32) bool
+
+type idleAwareFramer struct {
+	f              *spdy.Framer
+	conn           *Connection
+	writeLock      sync.Mutex
+	resetChan      chan struct{}
+	setTimeoutLock sync.Mutex
+	setTimeoutChan chan time.Duration
+	timeout        time.Duration
+}
+
+func newIdleAwareFramer(framer *spdy.Framer) *idleAwareFramer {
+	iaf := &idleAwareFramer{
+		f:         framer,
+		resetChan: make(chan struct{}, 2),
+		// setTimeoutChan needs to be buffered to avoid deadlocks when calling setIdleTimeout at about
+		// the same time the connection is being closed
+		setTimeoutChan: make(chan time.Duration, 1),
+	}
+	return iaf
+}
+
+func (i *idleAwareFramer) monitor() {
+	var (
+		timer          *time.Timer
+		expired        <-chan time.Time
+		resetChan      = i.resetChan
+		setTimeoutChan = i.setTimeoutChan
+	)
+Loop:
+	for {
+		select {
+		case timeout := <-i.setTimeoutChan:
+			i.timeout = timeout
+			if timeout == 0 {
+				if timer != nil {
+					timer.Stop()
+				}
+			} else {
+				if timer == nil {
+					timer = time.NewTimer(timeout)
+					expired = timer.C
+				} else {
+					timer.Reset(timeout)
+				}
+			}
+		case <-resetChan:
+			if timer != nil && i.timeout > 0 {
+				timer.Reset(i.timeout)
+			}
+		case <-expired:
+			i.conn.streamCond.L.Lock()
+			streams := i.conn.streams
+			i.conn.streams = make(map[spdy.StreamId]*Stream)
+			i.conn.streamCond.Broadcast()
+			i.conn.streamCond.L.Unlock()
+			go func() {
+				for _, stream := range streams {
+					stream.resetStream()
+				}
+				i.conn.Close()
+			}()
+		case <-i.conn.closeChan:
+			if timer != nil {
+				timer.Stop()
+			}
+
+			// Start a goroutine to drain resetChan. This is needed because we've seen
+			// some unit tests with large numbers of goroutines get into a situation
+			// where resetChan fills up, at least 1 call to Write() is still trying to
+			// send to resetChan, the connection gets closed, and this case statement
+			// attempts to grab the write lock that Write() already has, causing a
+			// deadlock.
+			//
+			// See https://github.com/docker/spdystream/issues/49 for more details.
+			go func() {
+				for _ = range resetChan {
+				}
+			}()
+
+			go func() {
+				for _ = range setTimeoutChan {
+				}
+			}()
+
+			i.writeLock.Lock()
+			close(resetChan)
+			i.resetChan = nil
+			i.writeLock.Unlock()
+
+			i.setTimeoutLock.Lock()
+			close(i.setTimeoutChan)
+			i.setTimeoutChan = nil
+			i.setTimeoutLock.Unlock()
+
+			break Loop
+		}
+	}
+
+	// Drain resetChan
+	for _ = range resetChan {
+	}
+}
+
+func (i *idleAwareFramer) WriteFrame(frame spdy.Frame) error {
+	i.writeLock.Lock()
+	defer i.writeLock.Unlock()
+	if i.resetChan == nil {
+		return io.EOF
+	}
+	err := i.f.WriteFrame(frame)
+	if err != nil {
+		return err
+	}
+
+	i.resetChan <- struct{}{}
+
+	return nil
+}
+
+func (i *idleAwareFramer) ReadFrame() (spdy.Frame, error) {
+	frame, err := i.f.ReadFrame()
+	if err != nil {
+		return nil, err
+	}
+
+	// resetChan should never be closed since it is only closed
+	// when the connection has closed its closeChan. This closure
+	// only occurs after all Reads have finished
+	// TODO (dmcgowan): refactor relationship into connection
+	i.resetChan <- struct{}{}
+
+	return frame, nil
+}
+
+func (i *idleAwareFramer) setIdleTimeout(timeout time.Duration) {
+	i.setTimeoutLock.Lock()
+	defer i.setTimeoutLock.Unlock()
+
+	if i.setTimeoutChan == nil {
+		return
+	}
+
+	i.setTimeoutChan <- timeout
+}
+
+type Connection struct {
+	conn   net.Conn
+	framer *idleAwareFramer
+
+	closeChan      chan bool
+	goneAway       bool
+	lastStreamChan chan<- *Stream
+	goAwayTimeout  time.Duration
+	closeTimeout   time.Duration
+
+	streamLock *sync.RWMutex
+	streamCond *sync.Cond
+	streams    map[spdy.StreamId]*Stream
+
+	nextIdLock       sync.Mutex
+	receiveIdLock    sync.Mutex
+	nextStreamId     spdy.StreamId
+	receivedStreamId spdy.StreamId
+
+	pingIdLock sync.Mutex
+	pingId     uint32
+	pingChans  map[uint32]chan error
+
+	shutdownLock sync.Mutex
+	shutdownChan chan error
+	hasShutdown  bool
+
+	// for testing https://github.com/docker/spdystream/pull/56
+	dataFrameHandler func(*spdy.DataFrame) error
+}
+
+// NewConnection creates a new spdy connection from an existing
+// network connection.
+func NewConnection(conn net.Conn, server bool) (*Connection, error) {
+	framer, framerErr := spdy.NewFramer(conn, conn)
+	if framerErr != nil {
+		return nil, framerErr
+	}
+	idleAwareFramer := newIdleAwareFramer(framer)
+	var sid spdy.StreamId
+	var rid spdy.StreamId
+	var pid uint32
+	if server {
+		sid = 2
+		rid = 1
+		pid = 2
+	} else {
+		sid = 1
+		rid = 2
+		pid = 1
+	}
+
+	streamLock := new(sync.RWMutex)
+	streamCond := sync.NewCond(streamLock)
+
+	session := &Connection{
+		conn:   conn,
+		framer: idleAwareFramer,
+
+		closeChan:     make(chan bool),
+		goAwayTimeout: time.Duration(0),
+		closeTimeout:  time.Duration(0),
+
+		streamLock:       streamLock,
+		streamCond:       streamCond,
+		streams:          make(map[spdy.StreamId]*Stream),
+		nextStreamId:     sid,
+		receivedStreamId: rid,
+
+		pingId:    pid,
+		pingChans: make(map[uint32]chan error),
+
+		shutdownChan: make(chan error),
+	}
+	session.dataFrameHandler = session.handleDataFrame
+	idleAwareFramer.conn = session
+	go idleAwareFramer.monitor()
+
+	return session, nil
+}
+
+// Ping sends a ping frame across the connection and
+// returns the response time
+func (s *Connection) Ping() (time.Duration, error) {
+	pid := s.pingId
+	s.pingIdLock.Lock()
+	if s.pingId > 0x7ffffffe {
+		s.pingId = s.pingId - 0x7ffffffe
+	} else {
+		s.pingId = s.pingId + 2
+	}
+	s.pingIdLock.Unlock()
+	pingChan := make(chan error)
+	s.pingChans[pid] = pingChan
+	defer delete(s.pingChans, pid)
+
+	frame := &spdy.PingFrame{Id: pid}
+	startTime := time.Now()
+	writeErr := s.framer.WriteFrame(frame)
+	if writeErr != nil {
+		return time.Duration(0), writeErr
+	}
+	select {
+	case <-s.closeChan:
+		return time.Duration(0), errors.New("connection closed")
+	case err, ok := <-pingChan:
+		if ok && err != nil {
+			return time.Duration(0), err
+		}
+		break
+	}
+	return time.Now().Sub(startTime), nil
+}
+
+// Serve handles frames sent from the server, including reply frames
+// which are needed to fully initiate connections.  Both clients and servers
+// should call Serve in a separate goroutine before creating streams.
+func (s *Connection) Serve(newHandler StreamHandler) {
+	// use a WaitGroup to wait for all frames to be drained after receiving
+	// go-away.
+	var wg sync.WaitGroup
+
+	// Parition queues to ensure stream frames are handled
+	// by the same worker, ensuring order is maintained
+	frameQueues := make([]*PriorityFrameQueue, FRAME_WORKERS)
+	for i := 0; i < FRAME_WORKERS; i++ {
+		frameQueues[i] = NewPriorityFrameQueue(QUEUE_SIZE)
+
+		// Ensure frame queue is drained when connection is closed
+		go func(frameQueue *PriorityFrameQueue) {
+			<-s.closeChan
+			frameQueue.Drain()
+		}(frameQueues[i])
+
+		wg.Add(1)
+		go func(frameQueue *PriorityFrameQueue) {
+			// let the WaitGroup know this worker is done
+			defer wg.Done()
+
+			s.frameHandler(frameQueue, newHandler)
+		}(frameQueues[i])
+	}
+
+	var (
+		partitionRoundRobin int
+		goAwayFrame         *spdy.GoAwayFrame
+	)
+Loop:
+	for {
+		readFrame, err := s.framer.ReadFrame()
+		if err != nil {
+			if err != io.EOF {
+				fmt.Errorf("frame read error: %s", err)
+			} else {
+				debugMessage("(%p) EOF received", s)
+			}
+			break
+		}
+		var priority uint8
+		var partition int
+		switch frame := readFrame.(type) {
+		case *spdy.SynStreamFrame:
+			if s.checkStreamFrame(frame) {
+				priority = frame.Priority
+				partition = int(frame.StreamId % FRAME_WORKERS)
+				debugMessage("(%p) Add stream frame: %d ", s, frame.StreamId)
+				s.addStreamFrame(frame)
+			} else {
+				debugMessage("(%p) Rejected stream frame: %d ", s, frame.StreamId)
+				continue
+			}
+		case *spdy.SynReplyFrame:
+			priority = s.getStreamPriority(frame.StreamId)
+			partition = int(frame.StreamId % FRAME_WORKERS)
+		case *spdy.DataFrame:
+			priority = s.getStreamPriority(frame.StreamId)
+			partition = int(frame.StreamId % FRAME_WORKERS)
+		case *spdy.RstStreamFrame:
+			priority = s.getStreamPriority(frame.StreamId)
+			partition = int(frame.StreamId % FRAME_WORKERS)
+		case *spdy.HeadersFrame:
+			priority = s.getStreamPriority(frame.StreamId)
+			partition = int(frame.StreamId % FRAME_WORKERS)
+		case *spdy.PingFrame:
+			priority = 0
+			partition = partitionRoundRobin
+			partitionRoundRobin = (partitionRoundRobin + 1) % FRAME_WORKERS
+		case *spdy.GoAwayFrame:
+			// hold on to the go away frame and exit the loop
+			goAwayFrame = frame
+			break Loop
+		default:
+			priority = 7
+			partition = partitionRoundRobin
+			partitionRoundRobin = (partitionRoundRobin + 1) % FRAME_WORKERS
+		}
+		frameQueues[partition].Push(readFrame, priority)
+	}
+	close(s.closeChan)
+
+	// wait for all frame handler workers to indicate they've drained their queues
+	// before handling the go away frame
+	wg.Wait()
+
+	if goAwayFrame != nil {
+		s.handleGoAwayFrame(goAwayFrame)
+	}
+
+	// now it's safe to close remote channels and empty s.streams
+	s.streamCond.L.Lock()
+	// notify streams that they're now closed, which will
+	// unblock any stream Read() calls
+	for _, stream := range s.streams {
+		stream.closeRemoteChannels()
+	}
+	s.streams = make(map[spdy.StreamId]*Stream)
+	s.streamCond.Broadcast()
+	s.streamCond.L.Unlock()
+}
+
+func (s *Connection) frameHandler(frameQueue *PriorityFrameQueue, newHandler StreamHandler) {
+	for {
+		popFrame := frameQueue.Pop()
+		if popFrame == nil {
+			return
+		}
+
+		var frameErr error
+		switch frame := popFrame.(type) {
+		case *spdy.SynStreamFrame:
+			frameErr = s.handleStreamFrame(frame, newHandler)
+		case *spdy.SynReplyFrame:
+			frameErr = s.handleReplyFrame(frame)
+		case *spdy.DataFrame:
+			frameErr = s.dataFrameHandler(frame)
+		case *spdy.RstStreamFrame:
+			frameErr = s.handleResetFrame(frame)
+		case *spdy.HeadersFrame:
+			frameErr = s.handleHeaderFrame(frame)
+		case *spdy.PingFrame:
+			frameErr = s.handlePingFrame(frame)
+		case *spdy.GoAwayFrame:
+			frameErr = s.handleGoAwayFrame(frame)
+		default:
+			frameErr = fmt.Errorf("unhandled frame type: %T", frame)
+		}
+
+		if frameErr != nil {
+			fmt.Errorf("frame handling error: %s", frameErr)
+		}
+	}
+}
+
+func (s *Connection) getStreamPriority(streamId spdy.StreamId) uint8 {
+	stream, streamOk := s.getStream(streamId)
+	if !streamOk {
+		return 7
+	}
+	return stream.priority
+}
+
+func (s *Connection) addStreamFrame(frame *spdy.SynStreamFrame) {
+	var parent *Stream
+	if frame.AssociatedToStreamId != spdy.StreamId(0) {
+		parent, _ = s.getStream(frame.AssociatedToStreamId)
+	}
+
+	stream := &Stream{
+		streamId:   frame.StreamId,
+		parent:     parent,
+		conn:       s,
+		startChan:  make(chan error),
+		headers:    frame.Headers,
+		finished:   (frame.CFHeader.Flags & spdy.ControlFlagUnidirectional) != 0x00,
+		replyCond:  sync.NewCond(new(sync.Mutex)),
+		dataChan:   make(chan []byte),
+		headerChan: make(chan http.Header),
+		closeChan:  make(chan bool),
+	}
+	if frame.CFHeader.Flags&spdy.ControlFlagFin != 0x00 {
+		stream.closeRemoteChannels()
+	}
+
+	s.addStream(stream)
+}
+
+// checkStreamFrame checks to see if a stream frame is allowed.
+// If the stream is invalid, then a reset frame with protocol error
+// will be returned.
+func (s *Connection) checkStreamFrame(frame *spdy.SynStreamFrame) bool {
+	s.receiveIdLock.Lock()
+	defer s.receiveIdLock.Unlock()
+	if s.goneAway {
+		return false
+	}
+	validationErr := s.validateStreamId(frame.StreamId)
+	if validationErr != nil {
+		go func() {
+			resetErr := s.sendResetFrame(spdy.ProtocolError, frame.StreamId)
+			if resetErr != nil {
+				fmt.Errorf("reset error: %s", resetErr)
+			}
+		}()
+		return false
+	}
+	return true
+}
+
+func (s *Connection) handleStreamFrame(frame *spdy.SynStreamFrame, newHandler StreamHandler) error {
+	stream, ok := s.getStream(frame.StreamId)
+	if !ok {
+		return fmt.Errorf("Missing stream: %d", frame.StreamId)
+	}
+
+	newHandler(stream)
+
+	return nil
+}
+
+func (s *Connection) handleReplyFrame(frame *spdy.SynReplyFrame) error {
+	debugMessage("(%p) Reply frame received for %d", s, frame.StreamId)
+	stream, streamOk := s.getStream(frame.StreamId)
+	if !streamOk {
+		debugMessage("Reply frame gone away for %d", frame.StreamId)
+		// Stream has already gone away
+		return nil
+	}
+	if stream.replied {
+		// Stream has already received reply
+		return nil
+	}
+	stream.replied = true
+
+	// TODO Check for error
+	if (frame.CFHeader.Flags & spdy.ControlFlagFin) != 0x00 {
+		s.remoteStreamFinish(stream)
+	}
+
+	close(stream.startChan)
+
+	return nil
+}
+
+func (s *Connection) handleResetFrame(frame *spdy.RstStreamFrame) error {
+	stream, streamOk := s.getStream(frame.StreamId)
+	if !streamOk {
+		// Stream has already been removed
+		return nil
+	}
+	s.removeStream(stream)
+	stream.closeRemoteChannels()
+
+	if !stream.replied {
+		stream.replied = true
+		stream.startChan <- ErrReset
+		close(stream.startChan)
+	}
+
+	stream.finishLock.Lock()
+	stream.finished = true
+	stream.finishLock.Unlock()
+
+	return nil
+}
+
+func (s *Connection) handleHeaderFrame(frame *spdy.HeadersFrame) error {
+	stream, streamOk := s.getStream(frame.StreamId)
+	if !streamOk {
+		// Stream has already gone away
+		return nil
+	}
+	if !stream.replied {
+		// No reply received...Protocol error?
+		return nil
+	}
+
+	// TODO limit headers while not blocking (use buffered chan or goroutine?)
+	select {
+	case <-stream.closeChan:
+		return nil
+	case stream.headerChan <- frame.Headers:
+	}
+
+	if (frame.CFHeader.Flags & spdy.ControlFlagFin) != 0x00 {
+		s.remoteStreamFinish(stream)
+	}
+
+	return nil
+}
+
+func (s *Connection) handleDataFrame(frame *spdy.DataFrame) error {
+	debugMessage("(%p) Data frame received for %d", s, frame.StreamId)
+	stream, streamOk := s.getStream(frame.StreamId)
+	if !streamOk {
+		debugMessage("(%p) Data frame gone away for %d", s, frame.StreamId)
+		// Stream has already gone away
+		return nil
+	}
+	if !stream.replied {
+		debugMessage("(%p) Data frame not replied %d", s, frame.StreamId)
+		// No reply received...Protocol error?
+		return nil
+	}
+
+	debugMessage("(%p) (%d) Data frame handling", stream, stream.streamId)
+	if len(frame.Data) > 0 {
+		stream.dataLock.RLock()
+		select {
+		case <-stream.closeChan:
+			debugMessage("(%p) (%d) Data frame not sent (stream shut down)", stream, stream.streamId)
+		case stream.dataChan <- frame.Data:
+			debugMessage("(%p) (%d) Data frame sent", stream, stream.streamId)
+		}
+		stream.dataLock.RUnlock()
+	}
+	if (frame.Flags & spdy.DataFlagFin) != 0x00 {
+		s.remoteStreamFinish(stream)
+	}
+	return nil
+}
+
+func (s *Connection) handlePingFrame(frame *spdy.PingFrame) error {
+	if s.pingId&0x01 != frame.Id&0x01 {
+		return s.framer.WriteFrame(frame)
+	}
+	pingChan, pingOk := s.pingChans[frame.Id]
+	if pingOk {
+		close(pingChan)
+	}
+	return nil
+}
+
+func (s *Connection) handleGoAwayFrame(frame *spdy.GoAwayFrame) error {
+	debugMessage("(%p) Go away received", s)
+	s.receiveIdLock.Lock()
+	if s.goneAway {
+		s.receiveIdLock.Unlock()
+		return nil
+	}
+	s.goneAway = true
+	s.receiveIdLock.Unlock()
+
+	if s.lastStreamChan != nil {
+		stream, _ := s.getStream(frame.LastGoodStreamId)
+		go func() {
+			s.lastStreamChan <- stream
+		}()
+	}
+
+	// Do not block frame handler waiting for closure
+	go s.shutdown(s.goAwayTimeout)
+
+	return nil
+}
+
+func (s *Connection) remoteStreamFinish(stream *Stream) {
+	stream.closeRemoteChannels()
+
+	stream.finishLock.Lock()
+	if stream.finished {
+		// Stream is fully closed, cleanup
+		s.removeStream(stream)
+	}
+	stream.finishLock.Unlock()
+}
+
+// CreateStream creates a new spdy stream using the parameters for
+// creating the stream frame.  The stream frame will be sent upon
+// calling this function, however this function does not wait for
+// the reply frame.  If waiting for the reply is desired, use
+// the stream Wait or WaitTimeout function on the stream returned
+// by this function.
+func (s *Connection) CreateStream(headers http.Header, parent *Stream, fin bool) (*Stream, error) {
+	// MUST synchronize stream creation (all the way to writing the frame)
+	// as stream IDs **MUST** increase monotonically.
+	s.nextIdLock.Lock()
+	defer s.nextIdLock.Unlock()
+
+	streamId := s.getNextStreamId()
+	if streamId == 0 {
+		return nil, fmt.Errorf("Unable to get new stream id")
+	}
+
+	stream := &Stream{
+		streamId:   streamId,
+		parent:     parent,
+		conn:       s,
+		startChan:  make(chan error),
+		headers:    headers,
+		dataChan:   make(chan []byte),
+		headerChan: make(chan http.Header),
+		closeChan:  make(chan bool),
+	}
+
+	debugMessage("(%p) (%p) Create stream", s, stream)
+
+	s.addStream(stream)
+
+	return stream, s.sendStream(stream, fin)
+}
+
+func (s *Connection) shutdown(closeTimeout time.Duration) {
+	// TODO Ensure this isn't called multiple times
+	s.shutdownLock.Lock()
+	if s.hasShutdown {
+		s.shutdownLock.Unlock()
+		return
+	}
+	s.hasShutdown = true
+	s.shutdownLock.Unlock()
+
+	var timeout <-chan time.Time
+	if closeTimeout > time.Duration(0) {
+		timeout = time.After(closeTimeout)
+	}
+	streamsClosed := make(chan bool)
+
+	go func() {
+		s.streamCond.L.Lock()
+		for len(s.streams) > 0 {
+			debugMessage("Streams opened: %d, %#v", len(s.streams), s.streams)
+			s.streamCond.Wait()
+		}
+		s.streamCond.L.Unlock()
+		close(streamsClosed)
+	}()
+
+	var err error
+	select {
+	case <-streamsClosed:
+		// No active streams, close should be safe
+		err = s.conn.Close()
+	case <-timeout:
+		// Force ungraceful close
+		err = s.conn.Close()
+		// Wait for cleanup to clear active streams
+		<-streamsClosed
+	}
+
+	if err != nil {
+		duration := 10 * time.Minute
+		time.AfterFunc(duration, func() {
+			select {
+			case err, ok := <-s.shutdownChan:
+				if ok {
+					fmt.Errorf("Unhandled close error after %s: %s", duration, err)
+				}
+			default:
+			}
+		})
+		s.shutdownChan <- err
+	}
+	close(s.shutdownChan)
+
+	return
+}
+
+// Closes spdy connection by sending GoAway frame and initiating shutdown
+func (s *Connection) Close() error {
+	s.receiveIdLock.Lock()
+	if s.goneAway {
+		s.receiveIdLock.Unlock()
+		return nil
+	}
+	s.goneAway = true
+	s.receiveIdLock.Unlock()
+
+	var lastStreamId spdy.StreamId
+	if s.receivedStreamId > 2 {
+		lastStreamId = s.receivedStreamId - 2
+	}
+
+	goAwayFrame := &spdy.GoAwayFrame{
+		LastGoodStreamId: lastStreamId,
+		Status:           spdy.GoAwayOK,
+	}
+
+	err := s.framer.WriteFrame(goAwayFrame)
+	if err != nil {
+		return err
+	}
+
+	go s.shutdown(s.closeTimeout)
+
+	return nil
+}
+
+// CloseWait closes the connection and waits for shutdown
+// to finish.  Note the underlying network Connection
+// is not closed until the end of shutdown.
+func (s *Connection) CloseWait() error {
+	closeErr := s.Close()
+	if closeErr != nil {
+		return closeErr
+	}
+	shutdownErr, ok := <-s.shutdownChan
+	if ok {
+		return shutdownErr
+	}
+	return nil
+}
+
+// Wait waits for the connection to finish shutdown or for
+// the wait timeout duration to expire.  This needs to be
+// called either after Close has been called or the GOAWAYFRAME
+// has been received.  If the wait timeout is 0, this function
+// will block until shutdown finishes.  If wait is never called
+// and a shutdown error occurs, that error will be logged as an
+// unhandled error.
+func (s *Connection) Wait(waitTimeout time.Duration) error {
+	var timeout <-chan time.Time
+	if waitTimeout > time.Duration(0) {
+		timeout = time.After(waitTimeout)
+	}
+
+	select {
+	case err, ok := <-s.shutdownChan:
+		if ok {
+			return err
+		}
+	case <-timeout:
+		return ErrTimeout
+	}
+	return nil
+}
+
+// NotifyClose registers a channel to be called when the remote
+// peer inidicates connection closure.  The last stream to be
+// received by the remote will be sent on the channel.  The notify
+// timeout will determine the duration between go away received
+// and the connection being closed.
+func (s *Connection) NotifyClose(c chan<- *Stream, timeout time.Duration) {
+	s.goAwayTimeout = timeout
+	s.lastStreamChan = c
+}
+
+// SetCloseTimeout sets the amount of time close will wait for
+// streams to finish before terminating the underlying network
+// connection.  Setting the timeout to 0 will cause close to
+// wait forever, which is the default.
+func (s *Connection) SetCloseTimeout(timeout time.Duration) {
+	s.closeTimeout = timeout
+}
+
+// SetIdleTimeout sets the amount of time the connection may sit idle before
+// it is forcefully terminated.
+func (s *Connection) SetIdleTimeout(timeout time.Duration) {
+	s.framer.setIdleTimeout(timeout)
+}
+
+func (s *Connection) sendHeaders(headers http.Header, stream *Stream, fin bool) error {
+	var flags spdy.ControlFlags
+	if fin {
+		flags = spdy.ControlFlagFin
+	}
+
+	headerFrame := &spdy.HeadersFrame{
+		StreamId: stream.streamId,
+		Headers:  headers,
+		CFHeader: spdy.ControlFrameHeader{Flags: flags},
+	}
+
+	return s.framer.WriteFrame(headerFrame)
+}
+
+func (s *Connection) sendReply(headers http.Header, stream *Stream, fin bool) error {
+	var flags spdy.ControlFlags
+	if fin {
+		flags = spdy.ControlFlagFin
+	}
+
+	replyFrame := &spdy.SynReplyFrame{
+		StreamId: stream.streamId,
+		Headers:  headers,
+		CFHeader: spdy.ControlFrameHeader{Flags: flags},
+	}
+
+	return s.framer.WriteFrame(replyFrame)
+}
+
+func (s *Connection) sendResetFrame(status spdy.RstStreamStatus, streamId spdy.StreamId) error {
+	resetFrame := &spdy.RstStreamFrame{
+		StreamId: streamId,
+		Status:   status,
+	}
+
+	return s.framer.WriteFrame(resetFrame)
+}
+
+func (s *Connection) sendReset(status spdy.RstStreamStatus, stream *Stream) error {
+	return s.sendResetFrame(status, stream.streamId)
+}
+
+func (s *Connection) sendStream(stream *Stream, fin bool) error {
+	var flags spdy.ControlFlags
+	if fin {
+		flags = spdy.ControlFlagFin
+		stream.finished = true
+	}
+
+	var parentId spdy.StreamId
+	if stream.parent != nil {
+		parentId = stream.parent.streamId
+	}
+
+	streamFrame := &spdy.SynStreamFrame{
+		StreamId:             spdy.StreamId(stream.streamId),
+		AssociatedToStreamId: spdy.StreamId(parentId),
+		Headers:              stream.headers,
+		CFHeader:             spdy.ControlFrameHeader{Flags: flags},
+	}
+
+	return s.framer.WriteFrame(streamFrame)
+}
+
+// getNextStreamId returns the next sequential id
+// every call should produce a unique value or an error
+func (s *Connection) getNextStreamId() spdy.StreamId {
+	sid := s.nextStreamId
+	if sid > 0x7fffffff {
+		return 0
+	}
+	s.nextStreamId = s.nextStreamId + 2
+	return sid
+}
+
+// PeekNextStreamId returns the next sequential id and keeps the next id untouched
+func (s *Connection) PeekNextStreamId() spdy.StreamId {
+	sid := s.nextStreamId
+	return sid
+}
+
+func (s *Connection) validateStreamId(rid spdy.StreamId) error {
+	if rid > 0x7fffffff || rid < s.receivedStreamId {
+		return ErrInvalidStreamId
+	}
+	s.receivedStreamId = rid + 2
+	return nil
+}
+
+func (s *Connection) addStream(stream *Stream) {
+	s.streamCond.L.Lock()
+	s.streams[stream.streamId] = stream
+	debugMessage("(%p) (%p) Stream added, broadcasting: %d", s, stream, stream.streamId)
+	s.streamCond.Broadcast()
+	s.streamCond.L.Unlock()
+}
+
+func (s *Connection) removeStream(stream *Stream) {
+	s.streamCond.L.Lock()
+	delete(s.streams, stream.streamId)
+	debugMessage("(%p) (%p) Stream removed, broadcasting: %d", s, stream, stream.streamId)
+	s.streamCond.Broadcast()
+	s.streamCond.L.Unlock()
+}
+
+func (s *Connection) getStream(streamId spdy.StreamId) (stream *Stream, ok bool) {
+	s.streamLock.RLock()
+	stream, ok = s.streams[streamId]
+	s.streamLock.RUnlock()
+	return
+}
+
+// FindStream looks up the given stream id and either waits for the
+// stream to be found or returns nil if the stream id is no longer
+// valid.
+func (s *Connection) FindStream(streamId uint32) *Stream {
+	var stream *Stream
+	var ok bool
+	s.streamCond.L.Lock()
+	stream, ok = s.streams[spdy.StreamId(streamId)]
+	debugMessage("(%p) Found stream %d? %t", s, spdy.StreamId(streamId), ok)
+	for !ok && streamId >= uint32(s.receivedStreamId) {
+		s.streamCond.Wait()
+		stream, ok = s.streams[spdy.StreamId(streamId)]
+	}
+	s.streamCond.L.Unlock()
+	return stream
+}
+
+func (s *Connection) CloseChan() <-chan bool {
+	return s.closeChan
+}
diff --git a/vendor/github.com/docker/spdystream/handlers.go b/vendor/github.com/docker/spdystream/handlers.go
new file mode 100644
index 00000000..b59fa5fd
--- /dev/null
+++ b/vendor/github.com/docker/spdystream/handlers.go
@@ -0,0 +1,38 @@
+package spdystream
+
+import (
+	"io"
+	"net/http"
+)
+
+// MirrorStreamHandler mirrors all streams.
+func MirrorStreamHandler(stream *Stream) {
+	replyErr := stream.SendReply(http.Header{}, false)
+	if replyErr != nil {
+		return
+	}
+
+	go func() {
+		io.Copy(stream, stream)
+		stream.Close()
+	}()
+	go func() {
+		for {
+			header, receiveErr := stream.ReceiveHeader()
+			if receiveErr != nil {
+				return
+			}
+			sendErr := stream.SendHeader(header, false)
+			if sendErr != nil {
+				return
+			}
+		}
+	}()
+}
+
+// NoopStreamHandler does nothing when stream connects, most
+// likely used with RejectAuthHandler which will not allow any
+// streams to make it to the stream handler.
+func NoOpStreamHandler(stream *Stream) {
+	stream.SendReply(http.Header{}, false)
+}
diff --git a/vendor/github.com/docker/spdystream/priority.go b/vendor/github.com/docker/spdystream/priority.go
new file mode 100644
index 00000000..fc8582b5
--- /dev/null
+++ b/vendor/github.com/docker/spdystream/priority.go
@@ -0,0 +1,98 @@
+package spdystream
+
+import (
+	"container/heap"
+	"sync"
+
+	"github.com/docker/spdystream/spdy"
+)
+
+type prioritizedFrame struct {
+	frame    spdy.Frame
+	priority uint8
+	insertId uint64
+}
+
+type frameQueue []*prioritizedFrame
+
+func (fq frameQueue) Len() int {
+	return len(fq)
+}
+
+func (fq frameQueue) Less(i, j int) bool {
+	if fq[i].priority == fq[j].priority {
+		return fq[i].insertId < fq[j].insertId
+	}
+	return fq[i].priority < fq[j].priority
+}
+
+func (fq frameQueue) Swap(i, j int) {
+	fq[i], fq[j] = fq[j], fq[i]
+}
+
+func (fq *frameQueue) Push(x interface{}) {
+	*fq = append(*fq, x.(*prioritizedFrame))
+}
+
+func (fq *frameQueue) Pop() interface{} {
+	old := *fq
+	n := len(old)
+	*fq = old[0 : n-1]
+	return old[n-1]
+}
+
+type PriorityFrameQueue struct {
+	queue        *frameQueue
+	c            *sync.Cond
+	size         int
+	nextInsertId uint64
+	drain        bool
+}
+
+func NewPriorityFrameQueue(size int) *PriorityFrameQueue {
+	queue := make(frameQueue, 0, size)
+	heap.Init(&queue)
+
+	return &PriorityFrameQueue{
+		queue: &queue,
+		size:  size,
+		c:     sync.NewCond(&sync.Mutex{}),
+	}
+}
+
+func (q *PriorityFrameQueue) Push(frame spdy.Frame, priority uint8) {
+	q.c.L.Lock()
+	defer q.c.L.Unlock()
+	for q.queue.Len() >= q.size {
+		q.c.Wait()
+	}
+	pFrame := &prioritizedFrame{
+		frame:    frame,
+		priority: priority,
+		insertId: q.nextInsertId,
+	}
+	q.nextInsertId = q.nextInsertId + 1
+	heap.Push(q.queue, pFrame)
+	q.c.Signal()
+}
+
+func (q *PriorityFrameQueue) Pop() spdy.Frame {
+	q.c.L.Lock()
+	defer q.c.L.Unlock()
+	for q.queue.Len() == 0 {
+		if q.drain {
+			return nil
+		}
+		q.c.Wait()
+	}
+	frame := heap.Pop(q.queue).(*prioritizedFrame).frame
+	q.c.Signal()
+	return frame
+}
+
+func (q *PriorityFrameQueue) Drain() {
+	q.c.L.Lock()
+	defer q.c.L.Unlock()
+	q.drain = true
+	q.c.Broadcast()
+}
diff --git a/vendor/github.com/docker/spdystream/spdy/dictionary.go b/vendor/github.com/docker/spdystream/spdy/dictionary.go
new file mode 100644
index 00000000..5a5ff0e1
--- /dev/null
+++ b/vendor/github.com/docker/spdystream/spdy/dictionary.go
@@ -0,0 +1,187 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package spdy
+
+// headerDictionary is the dictionary sent to the zlib compressor/decompressor.
+var headerDictionary = []byte{
+	0x00, 0x00, 0x00, 0x07, 0x6f, 0x70, 0x74, 0x69,
+	0x6f, 0x6e, 0x73, 0x00, 0x00, 0x00, 0x04, 0x68,
+	0x65, 0x61, 0x64, 0x00, 0x00, 0x00, 0x04, 0x70,
+	0x6f, 0x73, 0x74, 0x00, 0x00, 0x00, 0x03, 0x70,
+	0x75, 0x74, 0x00, 0x00, 0x00, 0x06, 0x64, 0x65,
+	0x6c, 0x65, 0x74, 0x65, 0x00, 0x00, 0x00, 0x05,
+	0x74, 0x72, 0x61, 0x63, 0x65, 0x00, 0x00, 0x00,
+	0x06, 0x61, 0x63, 0x63, 0x65, 0x70, 0x74, 0x00,
+	0x00, 0x00, 0x0e, 0x61, 0x63, 0x63, 0x65, 0x70,
+	0x74, 0x2d, 0x63, 0x68, 0x61, 0x72, 0x73, 0x65,
+	0x74, 0x00, 0x00, 0x00, 0x0f, 0x61, 0x63, 0x63,
+	0x65, 0x70, 0x74, 0x2d, 0x65, 0x6e, 0x63, 0x6f,
+	0x64, 0x69, 0x6e, 0x67, 0x00, 0x00, 0x00, 0x0f,
+	0x61, 0x63, 0x63, 0x65, 0x70, 0x74, 0x2d, 0x6c,
+	0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x00,
+	0x00, 0x00, 0x0d, 0x61, 0x63, 0x63, 0x65, 0x70,
+	0x74, 0x2d, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x73,
+	0x00, 0x00, 0x00, 0x03, 0x61, 0x67, 0x65, 0x00,
+	0x00, 0x00, 0x05, 0x61, 0x6c, 0x6c, 0x6f, 0x77,
+	0x00, 0x00, 0x00, 0x0d, 0x61, 0x75, 0x74, 0x68,
+	0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f,
+	0x6e, 0x00, 0x00, 0x00, 0x0d, 0x63, 0x61, 0x63,
+	0x68, 0x65, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72,
+	0x6f, 0x6c, 0x00, 0x00, 0x00, 0x0a, 0x63, 0x6f,
+	0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e,
+	0x00, 0x00, 0x00, 0x0c, 0x63, 0x6f, 0x6e, 0x74,
+	0x65, 0x6e, 0x74, 0x2d, 0x62, 0x61, 0x73, 0x65,
+	0x00, 0x00, 0x00, 0x10, 0x63, 0x6f, 0x6e, 0x74,
+	0x65, 0x6e, 0x74, 0x2d, 0x65, 0x6e, 0x63, 0x6f,
+	0x64, 0x69, 0x6e, 0x67, 0x00, 0x00, 0x00, 0x10,
+	0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x2d,
+	0x6c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65,
+	0x00, 0x00, 0x00, 0x0e, 0x63, 0x6f, 0x6e, 0x74,
+	0x65, 0x6e, 0x74, 0x2d, 0x6c, 0x65, 0x6e, 0x67,
+	0x74, 0x68, 0x00, 0x00, 0x00, 0x10, 0x63, 0x6f,
+	0x6e, 0x74, 0x65, 0x6e, 0x74, 0x2d, 0x6c, 0x6f,
+	0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x00, 0x00,
+	0x00, 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e,
+	0x74, 0x2d, 0x6d, 0x64, 0x35, 0x00, 0x00, 0x00,
+	0x0d, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74,
+	0x2d, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x00, 0x00,
+	0x00, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e,
+	0x74, 0x2d, 0x74, 0x79, 0x70, 0x65, 0x00, 0x00,
+	0x00, 0x04, 0x64, 0x61, 0x74, 0x65, 0x00, 0x00,
+	0x00, 0x04, 0x65, 0x74, 0x61, 0x67, 0x00, 0x00,
+	0x00, 0x06, 0x65, 0x78, 0x70, 0x65, 0x63, 0x74,
+	0x00, 0x00, 0x00, 0x07, 0x65, 0x78, 0x70, 0x69,
+	0x72, 0x65, 0x73, 0x00, 0x00, 0x00, 0x04, 0x66,
+	0x72, 0x6f, 0x6d, 0x00, 0x00, 0x00, 0x04, 0x68,
+	0x6f, 0x73, 0x74, 0x00, 0x00, 0x00, 0x08, 0x69,
+	0x66, 0x2d, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x00,
+	0x00, 0x00, 0x11, 0x69, 0x66, 0x2d, 0x6d, 0x6f,
+	0x64, 0x69, 0x66, 0x69, 0x65, 0x64, 0x2d, 0x73,
+	0x69, 0x6e, 0x63, 0x65, 0x00, 0x00, 0x00, 0x0d,
+	0x69, 0x66, 0x2d, 0x6e, 0x6f, 0x6e, 0x65, 0x2d,
+	0x6d, 0x61, 0x74, 0x63, 0x68, 0x00, 0x00, 0x00,
+	0x08, 0x69, 0x66, 0x2d, 0x72, 0x61, 0x6e, 0x67,
+	0x65, 0x00, 0x00, 0x00, 0x13, 0x69, 0x66, 0x2d,
+	0x75, 0x6e, 0x6d, 0x6f, 0x64, 0x69, 0x66, 0x69,
+	0x65, 0x64, 0x2d, 0x73, 0x69, 0x6e, 0x63, 0x65,
+	0x00, 0x00, 0x00, 0x0d, 0x6c, 0x61, 0x73, 0x74,
+	0x2d, 0x6d, 0x6f, 0x64, 0x69, 0x66, 0x69, 0x65,
+	0x64, 0x00, 0x00, 0x00, 0x08, 0x6c, 0x6f, 0x63,
+	0x61, 0x74, 0x69, 0x6f, 0x6e, 0x00, 0x00, 0x00,
+	0x0c, 0x6d, 0x61, 0x78, 0x2d, 0x66, 0x6f, 0x72,
+	0x77, 0x61, 0x72, 0x64, 0x73, 0x00, 0x00, 0x00,
+	0x06, 0x70, 0x72, 0x61, 0x67, 0x6d, 0x61, 0x00,
+	0x00, 0x00, 0x12, 0x70, 0x72, 0x6f, 0x78, 0x79,
+	0x2d, 0x61, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74,
+	0x69, 0x63, 0x61, 0x74, 0x65, 0x00, 0x00, 0x00,
+	0x13, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2d, 0x61,
+	0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61,
+	0x74, 0x69, 0x6f, 0x6e, 0x00, 0x00, 0x00, 0x05,
+	0x72, 0x61, 0x6e, 0x67, 0x65, 0x00, 0x00, 0x00,
+	0x07, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x72,
+	0x00, 0x00, 0x00, 0x0b, 0x72, 0x65, 0x74, 0x72,
+	0x79, 0x2d, 0x61, 0x66, 0x74, 0x65, 0x72, 0x00,
+	0x00, 0x00, 0x06, 0x73, 0x65, 0x72, 0x76, 0x65,
+	0x72, 0x00, 0x00, 0x00, 0x02, 0x74, 0x65, 0x00,
+	0x00, 0x00, 0x07, 0x74, 0x72, 0x61, 0x69, 0x6c,
+	0x65, 0x72, 0x00, 0x00, 0x00, 0x11, 0x74, 0x72,
+	0x61, 0x6e, 0x73, 0x66, 0x65, 0x72, 0x2d, 0x65,
+	0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x00,
+	0x00, 0x00, 0x07, 0x75, 0x70, 0x67, 0x72, 0x61,
+	0x64, 0x65, 0x00, 0x00, 0x00, 0x0a, 0x75, 0x73,
+	0x65, 0x72, 0x2d, 0x61, 0x67, 0x65, 0x6e, 0x74,
+	0x00, 0x00, 0x00, 0x04, 0x76, 0x61, 0x72, 0x79,
+	0x00, 0x00, 0x00, 0x03, 0x76, 0x69, 0x61, 0x00,
+	0x00, 0x00, 0x07, 0x77, 0x61, 0x72, 0x6e, 0x69,
+	0x6e, 0x67, 0x00, 0x00, 0x00, 0x10, 0x77, 0x77,
+	0x77, 0x2d, 0x61, 0x75, 0x74, 0x68, 0x65, 0x6e,
+	0x74, 0x69, 0x63, 0x61, 0x74, 0x65, 0x00, 0x00,
+	0x00, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64,
+	0x00, 0x00, 0x00, 0x03, 0x67, 0x65, 0x74, 0x00,
+	0x00, 0x00, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75,
+	0x73, 0x00, 0x00, 0x00, 0x06, 0x32, 0x30, 0x30,
+	0x20, 0x4f, 0x4b, 0x00, 0x00, 0x00, 0x07, 0x76,
+	0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x00, 0x00,
+	0x00, 0x08, 0x48, 0x54, 0x54, 0x50, 0x2f, 0x31,
+	0x2e, 0x31, 0x00, 0x00, 0x00, 0x03, 0x75, 0x72,
+	0x6c, 0x00, 0x00, 0x00, 0x06, 0x70, 0x75, 0x62,
+	0x6c, 0x69, 0x63, 0x00, 0x00, 0x00, 0x0a, 0x73,
+	0x65, 0x74, 0x2d, 0x63, 0x6f, 0x6f, 0x6b, 0x69,
+	0x65, 0x00, 0x00, 0x00, 0x0a, 0x6b, 0x65, 0x65,
+	0x70, 0x2d, 0x61, 0x6c, 0x69, 0x76, 0x65, 0x00,
+	0x00, 0x00, 0x06, 0x6f, 0x72, 0x69, 0x67, 0x69,
+	0x6e, 0x31, 0x30, 0x30, 0x31, 0x30, 0x31, 0x32,
+	0x30, 0x31, 0x32, 0x30, 0x32, 0x32, 0x30, 0x35,
+	0x32, 0x30, 0x36, 0x33, 0x30, 0x30, 0x33, 0x30,
+	0x32, 0x33, 0x30, 0x33, 0x33, 0x30, 0x34, 0x33,
+	0x30, 0x35, 0x33, 0x30, 0x36, 0x33, 0x30, 0x37,
+	0x34, 0x30, 0x32, 0x34, 0x30, 0x35, 0x34, 0x30,
+	0x36, 0x34, 0x30, 0x37, 0x34, 0x30, 0x38, 0x34,
+	0x30, 0x39, 0x34, 0x31, 0x30, 0x34, 0x31, 0x31,
+	0x34, 0x31, 0x32, 0x34, 0x31, 0x33, 0x34, 0x31,
+	0x34, 0x34, 0x31, 0x35, 0x34, 0x31, 0x36, 0x34,
+	0x31, 0x37, 0x35, 0x30, 0x32, 0x35, 0x30, 0x34,
+	0x35, 0x30, 0x35, 0x32, 0x30, 0x33, 0x20, 0x4e,
+	0x6f, 0x6e, 0x2d, 0x41, 0x75, 0x74, 0x68, 0x6f,
+	0x72, 0x69, 0x74, 0x61, 0x74, 0x69, 0x76, 0x65,
+	0x20, 0x49, 0x6e, 0x66, 0x6f, 0x72, 0x6d, 0x61,
+	0x74, 0x69, 0x6f, 0x6e, 0x32, 0x30, 0x34, 0x20,
+	0x4e, 0x6f, 0x20, 0x43, 0x6f, 0x6e, 0x74, 0x65,
+	0x6e, 0x74, 0x33, 0x30, 0x31, 0x20, 0x4d, 0x6f,
+	0x76, 0x65, 0x64, 0x20, 0x50, 0x65, 0x72, 0x6d,
+	0x61, 0x6e, 0x65, 0x6e, 0x74, 0x6c, 0x79, 0x34,
+	0x30, 0x30, 0x20, 0x42, 0x61, 0x64, 0x20, 0x52,
+	0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x34, 0x30,
+	0x31, 0x20, 0x55, 0x6e, 0x61, 0x75, 0x74, 0x68,
+	0x6f, 0x72, 0x69, 0x7a, 0x65, 0x64, 0x34, 0x30,
+	0x33, 0x20, 0x46, 0x6f, 0x72, 0x62, 0x69, 0x64,
+	0x64, 0x65, 0x6e, 0x34, 0x30, 0x34, 0x20, 0x4e,
+	0x6f, 0x74, 0x20, 0x46, 0x6f, 0x75, 0x6e, 0x64,
+	0x35, 0x30, 0x30, 0x20, 0x49, 0x6e, 0x74, 0x65,
+	0x72, 0x6e, 0x61, 0x6c, 0x20, 0x53, 0x65, 0x72,
+	0x76, 0x65, 0x72, 0x20, 0x45, 0x72, 0x72, 0x6f,
+	0x72, 0x35, 0x30, 0x31, 0x20, 0x4e, 0x6f, 0x74,
+	0x20, 0x49, 0x6d, 0x70, 0x6c, 0x65, 0x6d, 0x65,
+	0x6e, 0x74, 0x65, 0x64, 0x35, 0x30, 0x33, 0x20,
+	0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x20,
+	0x55, 0x6e, 0x61, 0x76, 0x61, 0x69, 0x6c, 0x61,
+	0x62, 0x6c, 0x65, 0x4a, 0x61, 0x6e, 0x20, 0x46,
+	0x65, 0x62, 0x20, 0x4d, 0x61, 0x72, 0x20, 0x41,
+	0x70, 0x72, 0x20, 0x4d, 0x61, 0x79, 0x20, 0x4a,
+	0x75, 0x6e, 0x20, 0x4a, 0x75, 0x6c, 0x20, 0x41,
+	0x75, 0x67, 0x20, 0x53, 0x65, 0x70, 0x74, 0x20,
+	0x4f, 0x63, 0x74, 0x20, 0x4e, 0x6f, 0x76, 0x20,
+	0x44, 0x65, 0x63, 0x20, 0x30, 0x30, 0x3a, 0x30,
+	0x30, 0x3a, 0x30, 0x30, 0x20, 0x4d, 0x6f, 0x6e,
+	0x2c, 0x20, 0x54, 0x75, 0x65, 0x2c, 0x20, 0x57,
+	0x65, 0x64, 0x2c, 0x20, 0x54, 0x68, 0x75, 0x2c,
+	0x20, 0x46, 0x72, 0x69, 0x2c, 0x20, 0x53, 0x61,
+	0x74, 0x2c, 0x20, 0x53, 0x75, 0x6e, 0x2c, 0x20,
+	0x47, 0x4d, 0x54, 0x63, 0x68, 0x75, 0x6e, 0x6b,
+	0x65, 0x64, 0x2c, 0x74, 0x65, 0x78, 0x74, 0x2f,
+	0x68, 0x74, 0x6d, 0x6c, 0x2c, 0x69, 0x6d, 0x61,
+	0x67, 0x65, 0x2f, 0x70, 0x6e, 0x67, 0x2c, 0x69,
+	0x6d, 0x61, 0x67, 0x65, 0x2f, 0x6a, 0x70, 0x67,
+	0x2c, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x2f, 0x67,
+	0x69, 0x66, 0x2c, 0x61, 0x70, 0x70, 0x6c, 0x69,
+	0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x78,
+	0x6d, 0x6c, 0x2c, 0x61, 0x70, 0x70, 0x6c, 0x69,
+	0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x78,
+	0x68, 0x74, 0x6d, 0x6c, 0x2b, 0x78, 0x6d, 0x6c,
+	0x2c, 0x74, 0x65, 0x78, 0x74, 0x2f, 0x70, 0x6c,
+	0x61, 0x69, 0x6e, 0x2c, 0x74, 0x65, 0x78, 0x74,
+	0x2f, 0x6a, 0x61, 0x76, 0x61, 0x73, 0x63, 0x72,
+	0x69, 0x70, 0x74, 0x2c, 0x70, 0x75, 0x62, 0x6c,
+	0x69, 0x63, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74,
+	0x65, 0x6d, 0x61, 0x78, 0x2d, 0x61, 0x67, 0x65,
+	0x3d, 0x67, 0x7a, 0x69, 0x70, 0x2c, 0x64, 0x65,
+	0x66, 0x6c, 0x61, 0x74, 0x65, 0x2c, 0x73, 0x64,
+	0x63, 0x68, 0x63, 0x68, 0x61, 0x72, 0x73, 0x65,
+	0x74, 0x3d, 0x75, 0x74, 0x66, 0x2d, 0x38, 0x63,
+	0x68, 0x61, 0x72, 0x73, 0x65, 0x74, 0x3d, 0x69,
+	0x73, 0x6f, 0x2d, 0x38, 0x38, 0x35, 0x39, 0x2d,
+	0x31, 0x2c, 0x75, 0x74, 0x66, 0x2d, 0x2c, 0x2a,
+	0x2c, 0x65, 0x6e, 0x71, 0x3d, 0x30, 0x2e,
+}
diff --git a/vendor/github.com/docker/spdystream/spdy/read.go b/vendor/github.com/docker/spdystream/spdy/read.go
new file mode 100644
index 00000000..9359a950
--- /dev/null
+++ b/vendor/github.com/docker/spdystream/spdy/read.go
@@ -0,0 +1,348 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package spdy
+
+import (
+	"compress/zlib"
+	"encoding/binary"
+	"io"
+	"net/http"
+	"strings"
+)
+
+func (frame *SynStreamFrame) read(h ControlFrameHeader, f *Framer) error {
+	return f.readSynStreamFrame(h, frame)
+}
+
+func (frame *SynReplyFrame) read(h ControlFrameHeader, f *Framer) error {
+	return f.readSynReplyFrame(h, frame)
+}
+
+func (frame *RstStreamFrame) read(h ControlFrameHeader, f *Framer) error {
+	frame.CFHeader = h
+	if err := binary.Read(f.r, binary.BigEndian, &frame.StreamId); err != nil {
+		return err
+	}
+	if err := binary.Read(f.r, binary.BigEndian, &frame.Status); err != nil {
+		return err
+	}
+	if frame.Status == 0 {
+		return &Error{InvalidControlFrame, frame.StreamId}
+	}
+	if frame.StreamId == 0 {
+		return &Error{ZeroStreamId, 0}
+	}
+	return nil
+}
+
+func (frame *SettingsFrame) read(h ControlFrameHeader, f *Framer) error {
+	frame.CFHeader = h
+	var numSettings uint32
+	if err := binary.Read(f.r, binary.BigEndian, &numSettings); err != nil {
+		return err
+	}
+	frame.FlagIdValues = make([]SettingsFlagIdValue, numSettings)
+	for i := uint32(0); i < numSettings; i++ {
+		if err := binary.Read(f.r, binary.BigEndian, &frame.FlagIdValues[i].Id); err != nil {
+			return err
+		}
+		frame.FlagIdValues[i].Flag = SettingsFlag((frame.FlagIdValues[i].Id & 0xff000000) >> 24)
+		frame.FlagIdValues[i].Id &= 0xffffff
+		if err := binary.Read(f.r, binary.BigEndian, &frame.FlagIdValues[i].Value); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+func (frame *PingFrame) read(h ControlFrameHeader, f *Framer) error {
+	frame.CFHeader = h
+	if err := binary.Read(f.r, binary.BigEndian, &frame.Id); err != nil {
+		return err
+	}
+	if frame.Id == 0 {
+		return &Error{ZeroStreamId, 0}
+	}
+	if frame.CFHeader.Flags != 0 {
+		return &Error{InvalidControlFrame, StreamId(frame.Id)}
+	}
+	return nil
+}
+
+func (frame *GoAwayFrame) read(h ControlFrameHeader, f *Framer) error {
+	frame.CFHeader = h
+	if err := binary.Read(f.r, binary.BigEndian, &frame.LastGoodStreamId); err != nil {
+		return err
+	}
+	if frame.CFHeader.Flags != 0 {
+		return &Error{InvalidControlFrame, frame.LastGoodStreamId}
+	}
+	if frame.CFHeader.length != 8 {
+		return &Error{InvalidControlFrame, frame.LastGoodStreamId}
+	}
+	if err := binary.Read(f.r, binary.BigEndian, &frame.Status); err != nil {
+		return err
+	}
+	return nil
+}
+
+func (frame *HeadersFrame) read(h ControlFrameHeader, f *Framer) error {
+	return f.readHeadersFrame(h, frame)
+}
+
+func (frame *WindowUpdateFrame) read(h ControlFrameHeader, f *Framer) error {
+	frame.CFHeader = h
+	if err := binary.Read(f.r, binary.BigEndian, &frame.StreamId); err != nil {
+		return err
+	}
+	if frame.CFHeader.Flags != 0 {
+		return &Error{InvalidControlFrame, frame.StreamId}
+	}
+	if frame.CFHeader.length != 8 {
+		return &Error{InvalidControlFrame, frame.StreamId}
+	}
+	if err := binary.Read(f.r, binary.BigEndian, &frame.DeltaWindowSize); err != nil {
+		return err
+	}
+	return nil
+}
+
+func newControlFrame(frameType ControlFrameType) (controlFrame, error) {
+	ctor, ok := cframeCtor[frameType]
+	if !ok {
+		return nil, &Error{Err: InvalidControlFrame}
+	}
+	return ctor(), nil
+}
+
+var cframeCtor = map[ControlFrameType]func() controlFrame{
+	TypeSynStream:    func() controlFrame { return new(SynStreamFrame) },
+	TypeSynReply:     func() controlFrame { return new(SynReplyFrame) },
+	TypeRstStream:    func() controlFrame { return new(RstStreamFrame) },
+	TypeSettings:     func() controlFrame { return new(SettingsFrame) },
+	TypePing:         func() controlFrame { return new(PingFrame) },
+	TypeGoAway:       func() controlFrame { return new(GoAwayFrame) },
+	TypeHeaders:      func() controlFrame { return new(HeadersFrame) },
+	TypeWindowUpdate: func() controlFrame { return new(WindowUpdateFrame) },
+}
+
+func (f *Framer) uncorkHeaderDecompressor(payloadSize int64) error {
+	if f.headerDecompressor != nil {
+		f.headerReader.N = payloadSize
+		return nil
+	}
+	f.headerReader = io.LimitedReader{R: f.r, N: payloadSize}
+	decompressor, err := zlib.NewReaderDict(&f.headerReader, []byte(headerDictionary))
+	if err != nil {
+		return err
+	}
+	f.headerDecompressor = decompressor
+	return nil
+}
+
+// ReadFrame reads SPDY encoded data and returns a decompressed Frame.
+func (f *Framer) ReadFrame() (Frame, error) {
+	var firstWord uint32
+	if err := binary.Read(f.r, binary.BigEndian, &firstWord); err != nil {
+		return nil, err
+	}
+	if firstWord&0x80000000 != 0 {
+		frameType := ControlFrameType(firstWord & 0xffff)
+		version := uint16(firstWord >> 16 & 0x7fff)
+		return f.parseControlFrame(version, frameType)
+	}
+	return f.parseDataFrame(StreamId(firstWord & 0x7fffffff))
+}
+
+func (f *Framer) parseControlFrame(version uint16, frameType ControlFrameType) (Frame, error) {
+	var length uint32
+	if err := binary.Read(f.r, binary.BigEndian, &length); err != nil {
+		return nil, err
+	}
+	flags := ControlFlags((length & 0xff000000) >> 24)
+	length &= 0xffffff
+	header := ControlFrameHeader{version, frameType, flags, length}
+	cframe, err := newControlFrame(frameType)
+	if err != nil {
+		return nil, err
+	}
+	if err = cframe.read(header, f); err != nil {
+		return nil, err
+	}
+	return cframe, nil
+}
+
+func parseHeaderValueBlock(r io.Reader, streamId StreamId) (http.Header, error) {
+	var numHeaders uint32
+	if err := binary.Read(r, binary.BigEndian, &numHeaders); err != nil {
+		return nil, err
+	}
+	var e error
+	h := make(http.Header, int(numHeaders))
+	for i := 0; i < int(numHeaders); i++ {
+		var length uint32
+		if err := binary.Read(r, binary.BigEndian, &length); err != nil {
+			return nil, err
+		}
+		nameBytes := make([]byte, length)
+		if _, err := io.ReadFull(r, nameBytes); err != nil {
+			return nil, err
+		}
+		name := string(nameBytes)
+		if name != strings.ToLower(name) {
+			e = &Error{UnlowercasedHeaderName, streamId}
+			name = strings.ToLower(name)
+		}
+		if h[name] != nil {
+			e = &Error{DuplicateHeaders, streamId}
+		}
+		if err := binary.Read(r, binary.BigEndian, &length); err != nil {
+			return nil, err
+		}
+		value := make([]byte, length)
+		if _, err := io.ReadFull(r, value); err != nil {
+			return nil, err
+		}
+		valueList := strings.Split(string(value), headerValueSeparator)
+		for _, v := range valueList {
+			h.Add(name, v)
+		}
+	}
+	if e != nil {
+		return h, e
+	}
+	return h, nil
+}
+
+func (f *Framer) readSynStreamFrame(h ControlFrameHeader, frame *SynStreamFrame) error {
+	frame.CFHeader = h
+	var err error
+	if err = binary.Read(f.r, binary.BigEndian, &frame.StreamId); err != nil {
+		return err
+	}
+	if err = binary.Read(f.r, binary.BigEndian, &frame.AssociatedToStreamId); err != nil {
+		return err
+	}
+	if err = binary.Read(f.r, binary.BigEndian, &frame.Priority); err != nil {
+		return err
+	}
+	frame.Priority >>= 5
+	if err = binary.Read(f.r, binary.BigEndian, &frame.Slot); err != nil {
+		return err
+	}
+	reader := f.r
+	if !f.headerCompressionDisabled {
+		err := f.uncorkHeaderDecompressor(int64(h.length - 10))
+		if err != nil {
+			return err
+		}
+		reader = f.headerDecompressor
+	}
+	frame.Headers, err = parseHeaderValueBlock(reader, frame.StreamId)
+	if !f.headerCompressionDisabled && (err == io.EOF && f.headerReader.N == 0 || f.headerReader.N != 0) {
+		err = &Error{WrongCompressedPayloadSize, 0}
+	}
+	if err != nil {
+		return err
+	}
+	for h := range frame.Headers {
+		if invalidReqHeaders[h] {
+			return &Error{InvalidHeaderPresent, frame.StreamId}
+		}
+	}
+	if frame.StreamId == 0 {
+		return &Error{ZeroStreamId, 0}
+	}
+	return nil
+}
+
+func (f *Framer) readSynReplyFrame(h ControlFrameHeader, frame *SynReplyFrame) error {
+	frame.CFHeader = h
+	var err error
+	if err = binary.Read(f.r, binary.BigEndian, &frame.StreamId); err != nil {
+		return err
+	}
+	reader := f.r
+	if !f.headerCompressionDisabled {
+		err := f.uncorkHeaderDecompressor(int64(h.length - 4))
+		if err != nil {
+			return err
+		}
+		reader = f.headerDecompressor
+	}
+	frame.Headers, err = parseHeaderValueBlock(reader, frame.StreamId)
+	if !f.headerCompressionDisabled && (err == io.EOF && f.headerReader.N == 0 || f.headerReader.N != 0) {
+		err = &Error{WrongCompressedPayloadSize, 0}
+	}
+	if err != nil {
+		return err
+	}
+	for h := range frame.Headers {
+		if invalidRespHeaders[h] {
+			return &Error{InvalidHeaderPresent, frame.StreamId}
+		}
+	}
+	if frame.StreamId == 0 {
+		return &Error{ZeroStreamId, 0}
+	}
+	return nil
+}
+
+func (f *Framer) readHeadersFrame(h ControlFrameHeader, frame *HeadersFrame) error {
+	frame.CFHeader = h
+	var err error
+	if err = binary.Read(f.r, binary.BigEndian, &frame.StreamId); err != nil {
+		return err
+	}
+	reader := f.r
+	if !f.headerCompressionDisabled {
+		err := f.uncorkHeaderDecompressor(int64(h.length - 4))
+		if err != nil {
+			return err
+		}
+		reader = f.headerDecompressor
+	}
+	frame.Headers, err = parseHeaderValueBlock(reader, frame.StreamId)
+	if !f.headerCompressionDisabled && (err == io.EOF && f.headerReader.N == 0 || f.headerReader.N != 0) {
+		err = &Error{WrongCompressedPayloadSize, 0}
+	}
+	if err != nil {
+		return err
+	}
+	var invalidHeaders map[string]bool
+	if frame.StreamId%2 == 0 {
+		invalidHeaders = invalidReqHeaders
+	} else {
+		invalidHeaders = invalidRespHeaders
+	}
+	for h := range frame.Headers {
+		if invalidHeaders[h] {
+			return &Error{InvalidHeaderPresent, frame.StreamId}
+		}
+	}
+	if frame.StreamId == 0 {
+		return &Error{ZeroStreamId, 0}
+	}
+	return nil
+}
+
+func (f *Framer) parseDataFrame(streamId StreamId) (*DataFrame, error) {
+	var length uint32
+	if err := binary.Read(f.r, binary.BigEndian, &length); err != nil {
+		return nil, err
+	}
+	var frame DataFrame
+	frame.StreamId = streamId
+	frame.Flags = DataFlags(length >> 24)
+	length &= 0xffffff
+	frame.Data = make([]byte, length)
+	if _, err := io.ReadFull(f.r, frame.Data); err != nil {
+		return nil, err
+	}
+	if frame.StreamId == 0 {
+		return nil, &Error{ZeroStreamId, 0}
+	}
+	return &frame, nil
+}
diff --git a/vendor/github.com/docker/spdystream/spdy/types.go b/vendor/github.com/docker/spdystream/spdy/types.go
new file mode 100644
index 00000000..7b6ee9c6
--- /dev/null
+++ b/vendor/github.com/docker/spdystream/spdy/types.go
@@ -0,0 +1,275 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package spdy implements the SPDY protocol (currently SPDY/3), described in
+// http://www.chromium.org/spdy/spdy-protocol/spdy-protocol-draft3.
+package spdy
+
+import (
+	"bytes"
+	"compress/zlib"
+	"io"
+	"net/http"
+)
+
+// Version is the protocol version number that this package implements.
+const Version = 3
+
+// ControlFrameType stores the type field in a control frame header.
+type ControlFrameType uint16
+
+const (
+	TypeSynStream    ControlFrameType = 0x0001
+	TypeSynReply                      = 0x0002
+	TypeRstStream                     = 0x0003
+	TypeSettings                      = 0x0004
+	TypePing                          = 0x0006
+	TypeGoAway                        = 0x0007
+	TypeHeaders                       = 0x0008
+	TypeWindowUpdate                  = 0x0009
+)
+
+// ControlFlags are the flags that can be set on a control frame.
+type ControlFlags uint8
+
+const (
+	ControlFlagFin                   ControlFlags = 0x01
+	ControlFlagUnidirectional                     = 0x02
+	ControlFlagSettingsClearSettings              = 0x01
+)
+
+// DataFlags are the flags that can be set on a data frame.
+type DataFlags uint8
+
+const (
+	DataFlagFin DataFlags = 0x01
+)
+
+// MaxDataLength is the maximum number of bytes that can be stored in one frame.
+const MaxDataLength = 1<<24 - 1
+
+// headerValueSepator separates multiple header values.
+const headerValueSeparator = "\x00"
+
+// Frame is a single SPDY frame in its unpacked in-memory representation. Use
+// Framer to read and write it.
+type Frame interface {
+	write(f *Framer) error
+}
+
+// ControlFrameHeader contains all the fields in a control frame header,
+// in its unpacked in-memory representation.
+type ControlFrameHeader struct {
+	// Note, high bit is the "Control" bit.
+	version   uint16 // spdy version number
+	frameType ControlFrameType
+	Flags     ControlFlags
+	length    uint32 // length of data field
+}
+
+type controlFrame interface {
+	Frame
+	read(h ControlFrameHeader, f *Framer) error
+}
+
+// StreamId represents a 31-bit value identifying the stream.
+type StreamId uint32
+
+// SynStreamFrame is the unpacked, in-memory representation of a SYN_STREAM
+// frame.
+type SynStreamFrame struct {
+	CFHeader             ControlFrameHeader
+	StreamId             StreamId
+	AssociatedToStreamId StreamId // stream id for a stream which this stream is associated to
+	Priority             uint8    // priority of this frame (3-bit)
+	Slot                 uint8    // index in the server's credential vector of the client certificate
+	Headers              http.Header
+}
+
+// SynReplyFrame is the unpacked, in-memory representation of a SYN_REPLY frame.
+type SynReplyFrame struct {
+	CFHeader ControlFrameHeader
+	StreamId StreamId
+	Headers  http.Header
+}
+
+// RstStreamStatus represents the status that led to a RST_STREAM.
+type RstStreamStatus uint32
+
+const (
+	ProtocolError RstStreamStatus = iota + 1
+	InvalidStream
+	RefusedStream
+	UnsupportedVersion
+	Cancel
+	InternalError
+	FlowControlError
+	StreamInUse
+	StreamAlreadyClosed
+	InvalidCredentials
+	FrameTooLarge
+)
+
+// RstStreamFrame is the unpacked, in-memory representation of a RST_STREAM
+// frame.
+type RstStreamFrame struct {
+	CFHeader ControlFrameHeader
+	StreamId StreamId
+	Status   RstStreamStatus
+}
+
+// SettingsFlag represents a flag in a SETTINGS frame.
+type SettingsFlag uint8
+
+const (
+	FlagSettingsPersistValue SettingsFlag = 0x1
+	FlagSettingsPersisted                 = 0x2
+)
+
+// SettingsFlag represents the id of an id/value pair in a SETTINGS frame.
+type SettingsId uint32
+
+const (
+	SettingsUploadBandwidth SettingsId = iota + 1
+	SettingsDownloadBandwidth
+	SettingsRoundTripTime
+	SettingsMaxConcurrentStreams
+	SettingsCurrentCwnd
+	SettingsDownloadRetransRate
+	SettingsInitialWindowSize
+	SettingsClientCretificateVectorSize
+)
+
+// SettingsFlagIdValue is the unpacked, in-memory representation of the
+// combined flag/id/value for a setting in a SETTINGS frame.
+type SettingsFlagIdValue struct {
+	Flag  SettingsFlag
+	Id    SettingsId
+	Value uint32
+}
+
+// SettingsFrame is the unpacked, in-memory representation of a SPDY
+// SETTINGS frame.
+type SettingsFrame struct {
+	CFHeader     ControlFrameHeader
+	FlagIdValues []SettingsFlagIdValue
+}
+
+// PingFrame is the unpacked, in-memory representation of a PING frame.
+type PingFrame struct {
+	CFHeader ControlFrameHeader
+	Id       uint32 // unique id for this ping, from server is even, from client is odd.
+}
+
+// GoAwayStatus represents the status in a GoAwayFrame.
+type GoAwayStatus uint32
+
+const (
+	GoAwayOK GoAwayStatus = iota
+	GoAwayProtocolError
+	GoAwayInternalError
+)
+
+// GoAwayFrame is the unpacked, in-memory representation of a GOAWAY frame.
+type GoAwayFrame struct {
+	CFHeader         ControlFrameHeader
+	LastGoodStreamId StreamId // last stream id which was accepted by sender
+	Status           GoAwayStatus
+}
+
+// HeadersFrame is the unpacked, in-memory representation of a HEADERS frame.
+type HeadersFrame struct {
+	CFHeader ControlFrameHeader
+	StreamId StreamId
+	Headers  http.Header
+}
+
+// WindowUpdateFrame is the unpacked, in-memory representation of a
+// WINDOW_UPDATE frame.
+type WindowUpdateFrame struct {
+	CFHeader        ControlFrameHeader
+	StreamId        StreamId
+	DeltaWindowSize uint32 // additional number of bytes to existing window size
+}
+
+// TODO: Implement credential frame and related methods.
+
+// DataFrame is the unpacked, in-memory representation of a DATA frame.
+type DataFrame struct {
+	// Note, high bit is the "Control" bit. Should be 0 for data frames.
+	StreamId StreamId
+	Flags    DataFlags
+	Data     []byte // payload data of this frame
+}
+
+// A SPDY specific error.
+type ErrorCode string
+
+const (
+	UnlowercasedHeaderName     ErrorCode = "header was not lowercased"
+	DuplicateHeaders                     = "multiple headers with same name"
+	WrongCompressedPayloadSize           = "compressed payload size was incorrect"
+	UnknownFrameType                     = "unknown frame type"
+	InvalidControlFrame                  = "invalid control frame"
+	InvalidDataFrame                     = "invalid data frame"
+	InvalidHeaderPresent                 = "frame contained invalid header"
+	ZeroStreamId                         = "stream id zero is disallowed"
+)
+
+// Error contains both the type of error and additional values. StreamId is 0
+// if Error is not associated with a stream.
+type Error struct {
+	Err      ErrorCode
+	StreamId StreamId
+}
+
+func (e *Error) Error() string {
+	return string(e.Err)
+}
+
+var invalidReqHeaders = map[string]bool{
+	"Connection":        true,
+	"Host":              true,
+	"Keep-Alive":        true,
+	"Proxy-Connection":  true,
+	"Transfer-Encoding": true,
+}
+
+var invalidRespHeaders = map[string]bool{
+	"Connection":        true,
+	"Keep-Alive":        true,
+	"Proxy-Connection":  true,
+	"Transfer-Encoding": true,
+}
+
+// Framer handles serializing/deserializing SPDY frames, including compressing/
+// decompressing payloads.
+type Framer struct {
+	headerCompressionDisabled bool
+	w                         io.Writer
+	headerBuf                 *bytes.Buffer
+	headerCompressor          *zlib.Writer
+	r                         io.Reader
+	headerReader              io.LimitedReader
+	headerDecompressor        io.ReadCloser
+}
+
+// NewFramer allocates a new Framer for a given SPDY connection, represented by
+// a io.Writer and io.Reader. Note that Framer will read and write individual fields
+// from/to the Reader and Writer, so the caller should pass in an appropriately
+// buffered implementation to optimize performance.
+func NewFramer(w io.Writer, r io.Reader) (*Framer, error) {
+	compressBuf := new(bytes.Buffer)
+	compressor, err := zlib.NewWriterLevelDict(compressBuf, zlib.BestCompression, []byte(headerDictionary))
+	if err != nil {
+		return nil, err
+	}
+	framer := &Framer{
+		w:                w,
+		headerBuf:        compressBuf,
+		headerCompressor: compressor,
+		r:                r,
+	}
+	return framer, nil
+}
diff --git a/vendor/github.com/docker/spdystream/spdy/write.go b/vendor/github.com/docker/spdystream/spdy/write.go
new file mode 100644
index 00000000..b212f66a
--- /dev/null
+++ b/vendor/github.com/docker/spdystream/spdy/write.go
@@ -0,0 +1,318 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package spdy
+
+import (
+	"encoding/binary"
+	"io"
+	"net/http"
+	"strings"
+)
+
+func (frame *SynStreamFrame) write(f *Framer) error {
+	return f.writeSynStreamFrame(frame)
+}
+
+func (frame *SynReplyFrame) write(f *Framer) error {
+	return f.writeSynReplyFrame(frame)
+}
+
+func (frame *RstStreamFrame) write(f *Framer) (err error) {
+	if frame.StreamId == 0 {
+		return &Error{ZeroStreamId, 0}
+	}
+	frame.CFHeader.version = Version
+	frame.CFHeader.frameType = TypeRstStream
+	frame.CFHeader.Flags = 0
+	frame.CFHeader.length = 8
+
+	// Serialize frame to Writer.
+	if err = writeControlFrameHeader(f.w, frame.CFHeader); err != nil {
+		return
+	}
+	if err = binary.Write(f.w, binary.BigEndian, frame.StreamId); err != nil {
+		return
+	}
+	if frame.Status == 0 {
+		return &Error{InvalidControlFrame, frame.StreamId}
+	}
+	if err = binary.Write(f.w, binary.BigEndian, frame.Status); err != nil {
+		return
+	}
+	return
+}
+
+func (frame *SettingsFrame) write(f *Framer) (err error) {
+	frame.CFHeader.version = Version
+	frame.CFHeader.frameType = TypeSettings
+	frame.CFHeader.length = uint32(len(frame.FlagIdValues)*8 + 4)
+
+	// Serialize frame to Writer.
+	if err = writeControlFrameHeader(f.w, frame.CFHeader); err != nil {
+		return
+	}
+	if err = binary.Write(f.w, binary.BigEndian, uint32(len(frame.FlagIdValues))); err != nil {
+		return
+	}
+	for _, flagIdValue := range frame.FlagIdValues {
+		flagId := uint32(flagIdValue.Flag)<<24 | uint32(flagIdValue.Id)
+		if err = binary.Write(f.w, binary.BigEndian, flagId); err != nil {
+			return
+		}
+		if err = binary.Write(f.w, binary.BigEndian, flagIdValue.Value); err != nil {
+			return
+		}
+	}
+	return
+}
+
+func (frame *PingFrame) write(f *Framer) (err error) {
+	if frame.Id == 0 {
+		return &Error{ZeroStreamId, 0}
+	}
+	frame.CFHeader.version = Version
+	frame.CFHeader.frameType = TypePing
+	frame.CFHeader.Flags = 0
+	frame.CFHeader.length = 4
+
+	// Serialize frame to Writer.
+	if err = writeControlFrameHeader(f.w, frame.CFHeader); err != nil {
+		return
+	}
+	if err = binary.Write(f.w, binary.BigEndian, frame.Id); err != nil {
+		return
+	}
+	return
+}
+
+func (frame *GoAwayFrame) write(f *Framer) (err error) {
+	frame.CFHeader.version = Version
+	frame.CFHeader.frameType = TypeGoAway
+	frame.CFHeader.Flags = 0
+	frame.CFHeader.length = 8
+
+	// Serialize frame to Writer.
+	if err = writeControlFrameHeader(f.w, frame.CFHeader); err != nil {
+		return
+	}
+	if err = binary.Write(f.w, binary.BigEndian, frame.LastGoodStreamId); err != nil {
+		return
+	}
+	if err = binary.Write(f.w, binary.BigEndian, frame.Status); err != nil {
+		return
+	}
+	return nil
+}
+
+func (frame *HeadersFrame) write(f *Framer) error {
+	return f.writeHeadersFrame(frame)
+}
+
+func (frame *WindowUpdateFrame) write(f *Framer) (err error) {
+	frame.CFHeader.version = Version
+	frame.CFHeader.frameType = TypeWindowUpdate
+	frame.CFHeader.Flags = 0
+	frame.CFHeader.length = 8
+
+	// Serialize frame to Writer.
+	if err = writeControlFrameHeader(f.w, frame.CFHeader); err != nil {
+		return
+	}
+	if err = binary.Write(f.w, binary.BigEndian, frame.StreamId); err != nil {
+		return
+	}
+	if err = binary.Write(f.w, binary.BigEndian, frame.DeltaWindowSize); err != nil {
+		return
+	}
+	return nil
+}
+
+func (frame *DataFrame) write(f *Framer) error {
+	return f.writeDataFrame(frame)
+}
+
+// WriteFrame writes a frame.
+func (f *Framer) WriteFrame(frame Frame) error {
+	return frame.write(f)
+}
+
+func writeControlFrameHeader(w io.Writer, h ControlFrameHeader) error {
+	if err := binary.Write(w, binary.BigEndian, 0x8000|h.version); err != nil {
+		return err
+	}
+	if err := binary.Write(w, binary.BigEndian, h.frameType); err != nil {
+		return err
+	}
+	flagsAndLength := uint32(h.Flags)<<24 | h.length
+	if err := binary.Write(w, binary.BigEndian, flagsAndLength); err != nil {
+		return err
+	}
+	return nil
+}
+
+func writeHeaderValueBlock(w io.Writer, h http.Header) (n int, err error) {
+	n = 0
+	if err = binary.Write(w, binary.BigEndian, uint32(len(h))); err != nil {
+		return
+	}
+	n += 2
+	for name, values := range h {
+		if err = binary.Write(w, binary.BigEndian, uint32(len(name))); err != nil {
+			return
+		}
+		n += 2
+		name = strings.ToLower(name)
+		if _, err = io.WriteString(w, name); err != nil {
+			return
+		}
+		n += len(name)
+		v := strings.Join(values, headerValueSeparator)
+		if err = binary.Write(w, binary.BigEndian, uint32(len(v))); err != nil {
+			return
+		}
+		n += 2
+		if _, err = io.WriteString(w, v); err != nil {
+			return
+		}
+		n += len(v)
+	}
+	return
+}
+
+func (f *Framer) writeSynStreamFrame(frame *SynStreamFrame) (err error) {
+	if frame.StreamId == 0 {
+		return &Error{ZeroStreamId, 0}
+	}
+	// Marshal the headers.
+	var writer io.Writer = f.headerBuf
+	if !f.headerCompressionDisabled {
+		writer = f.headerCompressor
+	}
+	if _, err = writeHeaderValueBlock(writer, frame.Headers); err != nil {
+		return
+	}
+	if !f.headerCompressionDisabled {
+		f.headerCompressor.Flush()
+	}
+
+	// Set ControlFrameHeader.
+	frame.CFHeader.version = Version
+	frame.CFHeader.frameType = TypeSynStream
+	frame.CFHeader.length = uint32(len(f.headerBuf.Bytes()) + 10)
+
+	// Serialize frame to Writer.
+	if err = writeControlFrameHeader(f.w, frame.CFHeader); err != nil {
+		return err
+	}
+	if err = binary.Write(f.w, binary.BigEndian, frame.StreamId); err != nil {
+		return err
+	}
+	if err = binary.Write(f.w, binary.BigEndian, frame.AssociatedToStreamId); err != nil {
+		return err
+	}
+	if err = binary.Write(f.w, binary.BigEndian, frame.Priority<<5); err != nil {
+		return err
+	}
+	if err = binary.Write(f.w, binary.BigEndian, frame.Slot); err != nil {
+		return err
+	}
+	if _, err = f.w.Write(f.headerBuf.Bytes()); err != nil {
+		return err
+	}
+	f.headerBuf.Reset()
+	return nil
+}
+
+func (f *Framer) writeSynReplyFrame(frame *SynReplyFrame) (err error) {
+	if frame.StreamId == 0 {
+		return &Error{ZeroStreamId, 0}
+	}
+	// Marshal the headers.
+	var writer io.Writer = f.headerBuf
+	if !f.headerCompressionDisabled {
+		writer = f.headerCompressor
+	}
+	if _, err = writeHeaderValueBlock(writer, frame.Headers); err != nil {
+		return
+	}
+	if !f.headerCompressionDisabled {
+		f.headerCompressor.Flush()
+	}
+
+	// Set ControlFrameHeader.
+	frame.CFHeader.version = Version
+	frame.CFHeader.frameType = TypeSynReply
+	frame.CFHeader.length = uint32(len(f.headerBuf.Bytes()) + 4)
+
+	// Serialize frame to Writer.
+	if err = writeControlFrameHeader(f.w, frame.CFHeader); err != nil {
+		return
+	}
+	if err = binary.Write(f.w, binary.BigEndian, frame.StreamId); err != nil {
+		return
+	}
+	if _, err = f.w.Write(f.headerBuf.Bytes()); err != nil {
+		return
+	}
+	f.headerBuf.Reset()
+	return
+}
+
+func (f *Framer) writeHeadersFrame(frame *HeadersFrame) (err error) {
+	if frame.StreamId == 0 {
+		return &Error{ZeroStreamId, 0}
+	}
+	// Marshal the headers.
+	var writer io.Writer = f.headerBuf
+	if !f.headerCompressionDisabled {
+		writer = f.headerCompressor
+	}
+	if _, err = writeHeaderValueBlock(writer, frame.Headers); err != nil {
+		return
+	}
+	if !f.headerCompressionDisabled {
+		f.headerCompressor.Flush()
+	}
+
+	// Set ControlFrameHeader.
+	frame.CFHeader.version = Version
+	frame.CFHeader.frameType = TypeHeaders
+	frame.CFHeader.length = uint32(len(f.headerBuf.Bytes()) + 4)
+
+	// Serialize frame to Writer.
+	if err = writeControlFrameHeader(f.w, frame.CFHeader); err != nil {
+		return
+	}
+	if err = binary.Write(f.w, binary.BigEndian, frame.StreamId); err != nil {
+		return
+	}
+	if _, err = f.w.Write(f.headerBuf.Bytes()); err != nil {
+		return
+	}
+	f.headerBuf.Reset()
+	return
+}
+
+func (f *Framer) writeDataFrame(frame *DataFrame) (err error) {
+	if frame.StreamId == 0 {
+		return &Error{ZeroStreamId, 0}
+	}
+	if frame.StreamId&0x80000000 != 0 || len(frame.Data) > MaxDataLength {
+		return &Error{InvalidDataFrame, frame.StreamId}
+	}
+
+	// Serialize frame to Writer.
+	if err = binary.Write(f.w, binary.BigEndian, frame.StreamId); err != nil {
+		return
+	}
+	flagsAndLength := uint32(frame.Flags)<<24 | uint32(len(frame.Data))
+	if err = binary.Write(f.w, binary.BigEndian, flagsAndLength); err != nil {
+		return
+	}
+	if _, err = f.w.Write(frame.Data); err != nil {
+		return
+	}
+	return nil
+}
diff --git a/vendor/github.com/docker/spdystream/stream.go b/vendor/github.com/docker/spdystream/stream.go
new file mode 100644
index 00000000..f9e9ee26
--- /dev/null
+++ b/vendor/github.com/docker/spdystream/stream.go
@@ -0,0 +1,327 @@
+package spdystream
+
+import (
+	"errors"
+	"fmt"
+	"io"
+	"net"
+	"net/http"
+	"sync"
+	"time"
+
+	"github.com/docker/spdystream/spdy"
+)
+
+var (
+	ErrUnreadPartialData = errors.New("unread partial data")
+)
+
+type Stream struct {
+	streamId  spdy.StreamId
+	parent    *Stream
+	conn      *Connection
+	startChan chan error
+
+	dataLock sync.RWMutex
+	dataChan chan []byte
+	unread   []byte
+
+	priority   uint8
+	headers    http.Header
+	headerChan chan http.Header
+	finishLock sync.Mutex
+	finished   bool
+	replyCond  *sync.Cond
+	replied    bool
+	closeLock  sync.Mutex
+	closeChan  chan bool
+}
+
+// WriteData writes data to stream, sending a dataframe per call
+func (s *Stream) WriteData(data []byte, fin bool) error {
+	s.waitWriteReply()
+	var flags spdy.DataFlags
+
+	if fin {
+		flags = spdy.DataFlagFin
+		s.finishLock.Lock()
+		if s.finished {
+			s.finishLock.Unlock()
+			return ErrWriteClosedStream
+		}
+		s.finished = true
+		s.finishLock.Unlock()
+	}
+
+	dataFrame := &spdy.DataFrame{
+		StreamId: s.streamId,
+		Flags:    flags,
+		Data:     data,
+	}
+
+	debugMessage("(%p) (%d) Writing data frame", s, s.streamId)
+	return s.conn.framer.WriteFrame(dataFrame)
+}
+
+// Write writes bytes to a stream, calling write data for each call.
+func (s *Stream) Write(data []byte) (n int, err error) {
+	err = s.WriteData(data, false)
+	if err == nil {
+		n = len(data)
+	}
+	return
+}
+
+// Read reads bytes from a stream, a single read will never get more
+// than what is sent on a single data frame, but a multiple calls to
+// read may get data from the same data frame.
+func (s *Stream) Read(p []byte) (n int, err error) {
+	if s.unread == nil {
+		select {
+		case <-s.closeChan:
+			return 0, io.EOF
+		case read, ok := <-s.dataChan:
+			if !ok {
+				return 0, io.EOF
+			}
+			s.unread = read
+		}
+	}
+	n = copy(p, s.unread)
+	if n < len(s.unread) {
+		s.unread = s.unread[n:]
+	} else {
+		s.unread = nil
+	}
+	return
+}
+
+// ReadData reads an entire data frame and returns the byte array
+// from the data frame.  If there is unread data from the result
+// of a Read call, this function will return an ErrUnreadPartialData.
+func (s *Stream) ReadData() ([]byte, error) {
+	debugMessage("(%p) Reading data from %d", s, s.streamId)
+	if s.unread != nil {
+		return nil, ErrUnreadPartialData
+	}
+	select {
+	case <-s.closeChan:
+		return nil, io.EOF
+	case read, ok := <-s.dataChan:
+		if !ok {
+			return nil, io.EOF
+		}
+		return read, nil
+	}
+}
+
+func (s *Stream) waitWriteReply() {
+	if s.replyCond != nil {
+		s.replyCond.L.Lock()
+		for !s.replied {
+			s.replyCond.Wait()
+		}
+		s.replyCond.L.Unlock()
+	}
+}
+
+// Wait waits for the stream to receive a reply.
+func (s *Stream) Wait() error {
+	return s.WaitTimeout(time.Duration(0))
+}
+
+// WaitTimeout waits for the stream to receive a reply or for timeout.
+// When the timeout is reached, ErrTimeout will be returned.
+func (s *Stream) WaitTimeout(timeout time.Duration) error {
+	var timeoutChan <-chan time.Time
+	if timeout > time.Duration(0) {
+		timeoutChan = time.After(timeout)
+	}
+
+	select {
+	case err := <-s.startChan:
+		if err != nil {
+			return err
+		}
+		break
+	case <-timeoutChan:
+		return ErrTimeout
+	}
+	return nil
+}
+
+// Close closes the stream by sending an empty data frame with the
+// finish flag set, indicating this side is finished with the stream.
+func (s *Stream) Close() error {
+	select {
+	case <-s.closeChan:
+		// Stream is now fully closed
+		s.conn.removeStream(s)
+	default:
+		break
+	}
+	return s.WriteData([]byte{}, true)
+}
+
+// Reset sends a reset frame, putting the stream into the fully closed state.
+func (s *Stream) Reset() error {
+	s.conn.removeStream(s)
+	return s.resetStream()
+}
+
+func (s *Stream) resetStream() error {
+	// Always call closeRemoteChannels, even if s.finished is already true.
+	// This makes it so that stream.Close() followed by stream.Reset() allows
+	// stream.Read() to unblock.
+	s.closeRemoteChannels()
+
+	s.finishLock.Lock()
+	if s.finished {
+		s.finishLock.Unlock()
+		return nil
+	}
+	s.finished = true
+	s.finishLock.Unlock()
+
+	resetFrame := &spdy.RstStreamFrame{
+		StreamId: s.streamId,
+		Status:   spdy.Cancel,
+	}
+	return s.conn.framer.WriteFrame(resetFrame)
+}
+
+// CreateSubStream creates a stream using the current as the parent
+func (s *Stream) CreateSubStream(headers http.Header, fin bool) (*Stream, error) {
+	return s.conn.CreateStream(headers, s, fin)
+}
+
+// SetPriority sets the stream priority, does not affect the
+// remote priority of this stream after Open has been called.
+// Valid values are 0 through 7, 0 being the highest priority
+// and 7 the lowest.
+func (s *Stream) SetPriority(priority uint8) {
+	s.priority = priority
+}
+
+// SendHeader sends a header frame across the stream
+func (s *Stream) SendHeader(headers http.Header, fin bool) error {
+	return s.conn.sendHeaders(headers, s, fin)
+}
+
+// SendReply sends a reply on a stream, only valid to be called once
+// when handling a new stream
+func (s *Stream) SendReply(headers http.Header, fin bool) error {
+	if s.replyCond == nil {
+		return errors.New("cannot reply on initiated stream")
+	}
+	s.replyCond.L.Lock()
+	defer s.replyCond.L.Unlock()
+	if s.replied {
+		return nil
+	}
+
+	err := s.conn.sendReply(headers, s, fin)
+	if err != nil {
+		return err
+	}
+
+	s.replied = true
+	s.replyCond.Broadcast()
+	return nil
+}
+
+// Refuse sends a reset frame with the status refuse, only
+// valid to be called once when handling a new stream.  This
+// may be used to indicate that a stream is not allowed
+// when http status codes are not being used.
+func (s *Stream) Refuse() error {
+	if s.replied {
+		return nil
+	}
+	s.replied = true
+	return s.conn.sendReset(spdy.RefusedStream, s)
+}
+
+// Cancel sends a reset frame with the status canceled. This
+// can be used at any time by the creator of the Stream to
+// indicate the stream is no longer needed.
+func (s *Stream) Cancel() error {
+	return s.conn.sendReset(spdy.Cancel, s)
+}
+
+// ReceiveHeader receives a header sent on the other side
+// of the stream.  This function will block until a header
+// is received or stream is closed.
+func (s *Stream) ReceiveHeader() (http.Header, error) {
+	select {
+	case <-s.closeChan:
+		break
+	case header, ok := <-s.headerChan:
+		if !ok {
+			return nil, fmt.Errorf("header chan closed")
+		}
+		return header, nil
+	}
+	return nil, fmt.Errorf("stream closed")
+}
+
+// Parent returns the parent stream
+func (s *Stream) Parent() *Stream {
+	return s.parent
+}
+
+// Headers returns the headers used to create the stream
+func (s *Stream) Headers() http.Header {
+	return s.headers
+}
+
+// String returns the string version of stream using the
+// streamId to uniquely identify the stream
+func (s *Stream) String() string {
+	return fmt.Sprintf("stream:%d", s.streamId)
+}
+
+// Identifier returns a 32 bit identifier for the stream
+func (s *Stream) Identifier() uint32 {
+	return uint32(s.streamId)
+}
+
+// IsFinished returns whether the stream has finished
+// sending data
+func (s *Stream) IsFinished() bool {
+	return s.finished
+}
+
+// Implement net.Conn interface
+
+func (s *Stream) LocalAddr() net.Addr {
+	return s.conn.conn.LocalAddr()
+}
+
+func (s *Stream) RemoteAddr() net.Addr {
+	return s.conn.conn.RemoteAddr()
+}
+
+// TODO set per stream values instead of connection-wide
+
+func (s *Stream) SetDeadline(t time.Time) error {
+	return s.conn.conn.SetDeadline(t)
+}
+
+func (s *Stream) SetReadDeadline(t time.Time) error {
+	return s.conn.conn.SetReadDeadline(t)
+}
+
+func (s *Stream) SetWriteDeadline(t time.Time) error {
+	return s.conn.conn.SetWriteDeadline(t)
+}
+
+func (s *Stream) closeRemoteChannels() {
+	s.closeLock.Lock()
+	defer s.closeLock.Unlock()
+	select {
+	case <-s.closeChan:
+	default:
+		close(s.closeChan)
+	}
+}
diff --git a/vendor/github.com/docker/spdystream/utils.go b/vendor/github.com/docker/spdystream/utils.go
new file mode 100644
index 00000000..1b2c199a
--- /dev/null
+++ b/vendor/github.com/docker/spdystream/utils.go
@@ -0,0 +1,16 @@
+package spdystream
+
+import (
+	"log"
+	"os"
+)
+
+var (
+	DEBUG = os.Getenv("DEBUG")
+)
+
+func debugMessage(fmt string, args ...interface{}) {
+	if DEBUG != "" {
+		log.Printf(fmt, args...)
+	}
+}
diff --git a/vendor/github.com/emicklei/go-restful/.gitignore b/vendor/github.com/emicklei/go-restful/.gitignore
new file mode 100644
index 00000000..cece7be6
--- /dev/null
+++ b/vendor/github.com/emicklei/go-restful/.gitignore
@@ -0,0 +1,70 @@
+# Compiled Object files, Static and Dynamic libs (Shared Objects)
+*.o
+*.a
+*.so
+
+# Folders
+_obj
+_test
+
+# Architecture specific extensions/prefixes
+*.[568vq]
+[568vq].out
+
+*.cgo1.go
+*.cgo2.c
+_cgo_defun.c
+_cgo_gotypes.go
+_cgo_export.*
+
+_testmain.go
+
+*.exe
+
+restful.html
+
+*.out
+
+tmp.prof
+
+go-restful.test
+
+examples/restful-basic-authentication
+
+examples/restful-encoding-filter
+
+examples/restful-filters
+
+examples/restful-hello-world
+
+examples/restful-resource-functions
+
+examples/restful-serve-static
+
+examples/restful-user-service
+
+*.DS_Store
+examples/restful-user-resource
+
+examples/restful-multi-containers
+
+examples/restful-form-handling
+
+examples/restful-CORS-filter
+
+examples/restful-options-filter
+
+examples/restful-curly-router
+
+examples/restful-cpuprofiler-service
+
+examples/restful-pre-post-filters
+
+curly.prof
+
+examples/restful-NCSA-logging
+
+examples/restful-html-template
+
+s.html
+restful-path-tail
diff --git a/vendor/github.com/emicklei/go-restful/.travis.yml b/vendor/github.com/emicklei/go-restful/.travis.yml
new file mode 100644
index 00000000..b22f8f54
--- /dev/null
+++ b/vendor/github.com/emicklei/go-restful/.travis.yml
@@ -0,0 +1,6 @@
+language: go
+
+go:
+  - 1.x
+
+script: go test -v
\ No newline at end of file
diff --git a/vendor/github.com/emicklei/go-restful/CHANGES.md b/vendor/github.com/emicklei/go-restful/CHANGES.md
new file mode 100644
index 00000000..e5252963
--- /dev/null
+++ b/vendor/github.com/emicklei/go-restful/CHANGES.md
@@ -0,0 +1,273 @@
+## Change history of go-restful
+
+
+v2.9.5
+- fix panic in Response.WriteError if err == nil
+
+v2.9.4
+
+- fix issue #400 , parsing mime type quality
+- Route Builder added option for contentEncodingEnabled (#398)
+
+v2.9.3
+
+- Avoid return of 415 Unsupported Media Type when request body is empty (#396)
+
+v2.9.2
+
+- Reduce allocations in per-request methods to improve performance (#395)
+
+v2.9.1
+
+- Fix issue with default responses and invalid status code 0. (#393)
+
+v2.9.0
+
+- add per Route content encoding setting (overrides container setting)
+
+v2.8.0
+
+- add Request.QueryParameters()
+- add json-iterator (via build tag)
+- disable vgo module (until log is moved)
+
+v2.7.1
+
+- add vgo module
+
+v2.6.1
+
+- add JSONNewDecoderFunc to allow custom JSON Decoder usage (go 1.10+)
+
+v2.6.0
+
+- Make JSR 311 routing and path param processing consistent
+- Adding description to RouteBuilder.Reads()
+- Update example for Swagger12 and OpenAPI
+
+2017-09-13
+
+- added route condition functions using `.If(func)` in route building.
+
+2017-02-16
+
+- solved issue #304, make operation names unique
+
+2017-01-30
+ 
+	[IMPORTANT] For swagger users, change your import statement to:	
+	swagger "github.com/emicklei/go-restful-swagger12"
+
+- moved swagger 1.2 code to go-restful-swagger12
+- created TAG 2.0.0
+
+2017-01-27
+
+- remove defer request body close
+- expose Dispatch for testing filters and Routefunctions
+- swagger response model cannot be array 
+- created TAG 1.0.0
+
+2016-12-22
+
+- (API change) Remove code related to caching request content. Removes SetCacheReadEntity(doCache bool)
+
+2016-11-26
+
+- Default change! now use CurlyRouter (was RouterJSR311)
+- Default change! no more caching of request content
+- Default change! do not recover from panics
+
+2016-09-22
+
+- fix the DefaultRequestContentType feature
+
+2016-02-14
+
+- take the qualify factor of the Accept header mediatype into account when deciding the contentype of the response
+- add constructors for custom entity accessors for xml and json 
+
+2015-09-27
+
+- rename new WriteStatusAnd... to WriteHeaderAnd... for consistency
+
+2015-09-25
+
+- fixed problem with changing Header after WriteHeader (issue 235)
+
+2015-09-14
+
+- changed behavior of WriteHeader (immediate write) and WriteEntity (no status write)
+- added support for custom EntityReaderWriters.
+
+2015-08-06
+
+- add support for reading entities from compressed request content
+- use sync.Pool for compressors of http response and request body
+- add Description to Parameter for documentation in Swagger UI
+
+2015-03-20
+
+- add configurable logging
+
+2015-03-18
+
+- if not specified, the Operation is derived from the Route function
+
+2015-03-17
+
+- expose Parameter creation functions
+- make trace logger an interface
+- fix OPTIONSFilter
+- customize rendering of ServiceError
+- JSR311 router now handles wildcards
+- add Notes to Route
+
+2014-11-27
+
+- (api add) PrettyPrint per response. (as proposed in #167)
+
+2014-11-12
+
+- (api add) ApiVersion(.) for documentation in Swagger UI
+
+2014-11-10
+
+- (api change) struct fields tagged with "description" show up in Swagger UI
+
+2014-10-31
+
+- (api change) ReturnsError -> Returns
+- (api add)    RouteBuilder.Do(aBuilder) for DRY use of RouteBuilder
+- fix swagger nested structs
+- sort Swagger response messages by code
+
+2014-10-23
+
+- (api add) ReturnsError allows you to document Http codes in swagger
+- fixed problem with greedy CurlyRouter
+- (api add) Access-Control-Max-Age in CORS
+- add tracing functionality (injectable) for debugging purposes
+- support JSON parse 64bit int 
+- fix empty parameters for swagger
+- WebServicesUrl is now optional for swagger
+- fixed duplicate AccessControlAllowOrigin in CORS
+- (api change) expose ServeMux in container
+- (api add) added AllowedDomains in CORS
+- (api add) ParameterNamed for detailed documentation
+
+2014-04-16
+
+- (api add) expose constructor of Request for testing.
+
+2014-06-27
+
+- (api add) ParameterNamed gives access to a Parameter definition and its data (for further specification).
+- (api add) SetCacheReadEntity allow scontrol over whether or not the request body is being cached (default true for compatibility reasons).
+
+2014-07-03
+
+- (api add) CORS can be configured with a list of allowed domains
+
+2014-03-12
+
+- (api add) Route path parameters can use wildcard or regular expressions. (requires CurlyRouter)
+
+2014-02-26
+
+- (api add) Request now provides information about the matched Route, see method SelectedRoutePath 
+
+2014-02-17
+
+- (api change) renamed parameter constants (go-lint checks)
+
+2014-01-10
+
+- (api add) support for CloseNotify, see http://golang.org/pkg/net/http/#CloseNotifier
+
+2014-01-07
+
+- (api change) Write* methods in Response now return the error or nil.
+- added example of serving HTML from a Go template.
+- fixed comparing Allowed headers in CORS (is now case-insensitive)
+
+2013-11-13
+
+- (api add) Response knows how many bytes are written to the response body.
+
+2013-10-29
+
+- (api add) RecoverHandler(handler RecoverHandleFunction) to change how panic recovery is handled. Default behavior is to log and return a stacktrace. This may be a security issue as it exposes sourcecode information.
+
+2013-10-04
+
+- (api add) Response knows what HTTP status has been written
+- (api add) Request can have attributes (map of string->interface, also called request-scoped variables
+
+2013-09-12
+
+- (api change) Router interface simplified
+- Implemented CurlyRouter, a Router that does not use|allow regular expressions in paths
+
+2013-08-05
+ - add OPTIONS support
+ - add CORS support
+
+2013-08-27
+
+- fixed some reported issues (see github)
+- (api change) deprecated use of WriteError; use WriteErrorString instead
+
+2014-04-15
+
+- (fix) v1.0.1 tag: fix Issue 111: WriteErrorString
+
+2013-08-08
+
+- (api add) Added implementation Container: a WebServices collection with its own http.ServeMux allowing multiple endpoints per program. Existing uses of go-restful will register their services to the DefaultContainer.
+- (api add) the swagger package has be extended to have a UI per container.
+- if panic is detected then a small stack trace is printed (thanks to runner-mei)
+- (api add) WriteErrorString to Response
+
+Important API changes:
+
+- (api remove) package variable DoNotRecover no longer works ; use restful.DefaultContainer.DoNotRecover(true) instead.
+- (api remove) package variable EnableContentEncoding no longer works ; use restful.DefaultContainer.EnableContentEncoding(true) instead.
+ 
+ 
+2013-07-06
+
+- (api add) Added support for response encoding (gzip and deflate(zlib)). This feature is disabled on default (for backwards compatibility). Use restful.EnableContentEncoding = true in your initialization to enable this feature.
+
+2013-06-19
+
+- (improve) DoNotRecover option, moved request body closer, improved ReadEntity
+
+2013-06-03
+
+- (api change) removed Dispatcher interface, hide PathExpression
+- changed receiver names of type functions to be more idiomatic Go
+
+2013-06-02
+
+- (optimize) Cache the RegExp compilation of Paths.
+
+2013-05-22
+	
+- (api add) Added support for request/response filter functions
+
+2013-05-18
+
+
+- (api add) Added feature to change the default Http Request Dispatch function (travis cline)
+- (api change) Moved Swagger Webservice to swagger package (see example restful-user)
+
+[2012-11-14 .. 2013-05-18>
+ 
+- See https://github.com/emicklei/go-restful/commits
+
+2012-11-14
+
+- Initial commit
+
+
diff --git a/vendor/github.com/emicklei/go-restful/LICENSE b/vendor/github.com/emicklei/go-restful/LICENSE
new file mode 100644
index 00000000..ece7ec61
--- /dev/null
+++ b/vendor/github.com/emicklei/go-restful/LICENSE
@@ -0,0 +1,22 @@
+Copyright (c) 2012,2013 Ernest Micklei
+
+MIT License
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice shall be
+included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
\ No newline at end of file
diff --git a/vendor/github.com/emicklei/go-restful/Makefile b/vendor/github.com/emicklei/go-restful/Makefile
new file mode 100644
index 00000000..b40081cc
--- /dev/null
+++ b/vendor/github.com/emicklei/go-restful/Makefile
@@ -0,0 +1,7 @@
+all: test
+
+test:
+	go test -v .
+
+ex:
+	cd examples && ls *.go | xargs go build -o /tmp/ignore
\ No newline at end of file
diff --git a/vendor/github.com/emicklei/go-restful/README.md b/vendor/github.com/emicklei/go-restful/README.md
new file mode 100644
index 00000000..f52c25ac
--- /dev/null
+++ b/vendor/github.com/emicklei/go-restful/README.md
@@ -0,0 +1,88 @@
+go-restful
+==========
+package for building REST-style Web Services using Google Go
+
+[![Build Status](https://travis-ci.org/emicklei/go-restful.png)](https://travis-ci.org/emicklei/go-restful)
+[![Go Report Card](https://goreportcard.com/badge/github.com/emicklei/go-restful)](https://goreportcard.com/report/github.com/emicklei/go-restful)
+[![GoDoc](https://godoc.org/github.com/emicklei/go-restful?status.svg)](https://godoc.org/github.com/emicklei/go-restful)
+
+- [Code examples](https://github.com/emicklei/go-restful/tree/master/examples)
+
+REST asks developers to use HTTP methods explicitly and in a way that's consistent with the protocol definition. This basic REST design principle establishes a one-to-one mapping between create, read, update, and delete (CRUD) operations and HTTP methods. According to this mapping:
+
+- GET = Retrieve a representation of a resource
+- POST = Create if you are sending content to the server to create a subordinate of the specified resource collection, using some server-side algorithm.
+- PUT = Create if you are sending the full content of the specified resource (URI).
+- PUT = Update if you are updating the full content of the specified resource.
+- DELETE = Delete if you are requesting the server to delete the resource
+- PATCH = Update partial content of a resource
+- OPTIONS = Get information about the communication options for the request URI
+    
+### Example
+
+```Go
+ws := new(restful.WebService)
+ws.
+	Path("/users").
+	Consumes(restful.MIME_XML, restful.MIME_JSON).
+	Produces(restful.MIME_JSON, restful.MIME_XML)
+
+ws.Route(ws.GET("/{user-id}").To(u.findUser).
+	Doc("get a user").
+	Param(ws.PathParameter("user-id", "identifier of the user").DataType("string")).
+	Writes(User{}))		
+...
+	
+func (u UserResource) findUser(request *restful.Request, response *restful.Response) {
+	id := request.PathParameter("user-id")
+	...
+}
+```
+	
+[Full API of a UserResource](https://github.com/emicklei/go-restful/tree/master/examples/restful-user-resource.go) 
+		
+### Features
+
+- Routes for request &#8594; function mapping with path parameter (e.g. {id}) support
+- Configurable router:
+	- (default) Fast routing algorithm that allows static elements, regular expressions and dynamic parameters in the URL path (e.g. /meetings/{id} or /static/{subpath:*}
+	- Routing algorithm after [JSR311](http://jsr311.java.net/nonav/releases/1.1/spec/spec.html) that is implemented using (but does **not** accept) regular expressions
+- Request API for reading structs from JSON/XML and accesing parameters (path,query,header)
+- Response API for writing structs to JSON/XML and setting headers
+- Customizable encoding using EntityReaderWriter registration
+- Filters for intercepting the request &#8594; response flow on Service or Route level
+- Request-scoped variables using attributes
+- Containers for WebServices on different HTTP endpoints
+- Content encoding (gzip,deflate) of request and response payloads
+- Automatic responses on OPTIONS (using a filter)
+- Automatic CORS request handling (using a filter)
+- API declaration for Swagger UI ([go-restful-openapi](https://github.com/emicklei/go-restful-openapi), see [go-restful-swagger12](https://github.com/emicklei/go-restful-swagger12))
+- Panic recovery to produce HTTP 500, customizable using RecoverHandler(...)
+- Route errors produce HTTP 404/405/406/415 errors, customizable using ServiceErrorHandler(...)
+- Configurable (trace) logging
+- Customizable gzip/deflate readers and writers using CompressorProvider registration
+
+## How to customize
+There are several hooks to customize the behavior of the go-restful package.
+
+- Router algorithm
+- Panic recovery
+- JSON decoder
+- Trace logging
+- Compression
+- Encoders for other serializers
+- Use [jsoniter](https://github.com/json-iterator/go) by build this package using a tag, e.g. `go build -tags=jsoniter .`
+
+TODO: write examples of these.
+
+## Resources
+
+- [Example posted on blog](http://ernestmicklei.com/2012/11/go-restful-first-working-example/)
+- [Design explained on blog](http://ernestmicklei.com/2012/11/go-restful-api-design/)
+- [sourcegraph](https://sourcegraph.com/github.com/emicklei/go-restful)
+- [showcase: Zazkia - tcp proxy for testing resiliency](https://github.com/emicklei/zazkia)
+- [showcase: Mora - MongoDB REST Api server](https://github.com/emicklei/mora)
+
+Type ```git shortlog -s``` for a full list of contributors.
+
+© 2012 - 2018, http://ernestmicklei.com. MIT License. Contributions are welcome.
diff --git a/vendor/github.com/emicklei/go-restful/Srcfile b/vendor/github.com/emicklei/go-restful/Srcfile
new file mode 100644
index 00000000..16fd1868
--- /dev/null
+++ b/vendor/github.com/emicklei/go-restful/Srcfile
@@ -0,0 +1 @@
+{"SkipDirs": ["examples"]}
diff --git a/vendor/github.com/emicklei/go-restful/bench_test.sh b/vendor/github.com/emicklei/go-restful/bench_test.sh
new file mode 100644
index 00000000..47ffbe4a
--- /dev/null
+++ b/vendor/github.com/emicklei/go-restful/bench_test.sh
@@ -0,0 +1,10 @@
+#go test -run=none -file bench_test.go -test.bench . -cpuprofile=bench_test.out
+
+go test -c
+./go-restful.test -test.run=none -test.cpuprofile=tmp.prof -test.bench=BenchmarkMany
+./go-restful.test -test.run=none -test.cpuprofile=curly.prof -test.bench=BenchmarkManyCurly
+
+#go tool pprof go-restful.test tmp.prof
+go tool pprof go-restful.test curly.prof
+
+
diff --git a/vendor/github.com/emicklei/go-restful/compress.go b/vendor/github.com/emicklei/go-restful/compress.go
new file mode 100644
index 00000000..220b3771
--- /dev/null
+++ b/vendor/github.com/emicklei/go-restful/compress.go
@@ -0,0 +1,123 @@
+package restful
+
+// Copyright 2013 Ernest Micklei. All rights reserved.
+// Use of this source code is governed by a license
+// that can be found in the LICENSE file.
+
+import (
+	"bufio"
+	"compress/gzip"
+	"compress/zlib"
+	"errors"
+	"io"
+	"net"
+	"net/http"
+	"strings"
+)
+
+// OBSOLETE : use restful.DefaultContainer.EnableContentEncoding(true) to change this setting.
+var EnableContentEncoding = false
+
+// CompressingResponseWriter is a http.ResponseWriter that can perform content encoding (gzip and zlib)
+type CompressingResponseWriter struct {
+	writer     http.ResponseWriter
+	compressor io.WriteCloser
+	encoding   string
+}
+
+// Header is part of http.ResponseWriter interface
+func (c *CompressingResponseWriter) Header() http.Header {
+	return c.writer.Header()
+}
+
+// WriteHeader is part of http.ResponseWriter interface
+func (c *CompressingResponseWriter) WriteHeader(status int) {
+	c.writer.WriteHeader(status)
+}
+
+// Write is part of http.ResponseWriter interface
+// It is passed through the compressor
+func (c *CompressingResponseWriter) Write(bytes []byte) (int, error) {
+	if c.isCompressorClosed() {
+		return -1, errors.New("Compressing error: tried to write data using closed compressor")
+	}
+	return c.compressor.Write(bytes)
+}
+
+// CloseNotify is part of http.CloseNotifier interface
+func (c *CompressingResponseWriter) CloseNotify() <-chan bool {
+	return c.writer.(http.CloseNotifier).CloseNotify()
+}
+
+// Close the underlying compressor
+func (c *CompressingResponseWriter) Close() error {
+	if c.isCompressorClosed() {
+		return errors.New("Compressing error: tried to close already closed compressor")
+	}
+
+	c.compressor.Close()
+	if ENCODING_GZIP == c.encoding {
+		currentCompressorProvider.ReleaseGzipWriter(c.compressor.(*gzip.Writer))
+	}
+	if ENCODING_DEFLATE == c.encoding {
+		currentCompressorProvider.ReleaseZlibWriter(c.compressor.(*zlib.Writer))
+	}
+	// gc hint needed?
+	c.compressor = nil
+	return nil
+}
+
+func (c *CompressingResponseWriter) isCompressorClosed() bool {
+	return nil == c.compressor
+}
+
+// Hijack implements the Hijacker interface
+// This is especially useful when combining Container.EnabledContentEncoding
+// in combination with websockets (for instance gorilla/websocket)
+func (c *CompressingResponseWriter) Hijack() (net.Conn, *bufio.ReadWriter, error) {
+	hijacker, ok := c.writer.(http.Hijacker)
+	if !ok {
+		return nil, nil, errors.New("ResponseWriter doesn't support Hijacker interface")
+	}
+	return hijacker.Hijack()
+}
+
+// WantsCompressedResponse reads the Accept-Encoding header to see if and which encoding is requested.
+func wantsCompressedResponse(httpRequest *http.Request) (bool, string) {
+	header := httpRequest.Header.Get(HEADER_AcceptEncoding)
+	gi := strings.Index(header, ENCODING_GZIP)
+	zi := strings.Index(header, ENCODING_DEFLATE)
+	// use in order of appearance
+	if gi == -1 {
+		return zi != -1, ENCODING_DEFLATE
+	} else if zi == -1 {
+		return gi != -1, ENCODING_GZIP
+	} else {
+		if gi < zi {
+			return true, ENCODING_GZIP
+		}
+		return true, ENCODING_DEFLATE
+	}
+}
+
+// NewCompressingResponseWriter create a CompressingResponseWriter for a known encoding = {gzip,deflate}
+func NewCompressingResponseWriter(httpWriter http.ResponseWriter, encoding string) (*CompressingResponseWriter, error) {
+	httpWriter.Header().Set(HEADER_ContentEncoding, encoding)
+	c := new(CompressingResponseWriter)
+	c.writer = httpWriter
+	var err error
+	if ENCODING_GZIP == encoding {
+		w := currentCompressorProvider.AcquireGzipWriter()
+		w.Reset(httpWriter)
+		c.compressor = w
+		c.encoding = ENCODING_GZIP
+	} else if ENCODING_DEFLATE == encoding {
+		w := currentCompressorProvider.AcquireZlibWriter()
+		w.Reset(httpWriter)
+		c.compressor = w
+		c.encoding = ENCODING_DEFLATE
+	} else {
+		return nil, errors.New("Unknown encoding:" + encoding)
+	}
+	return c, err
+}
diff --git a/vendor/github.com/emicklei/go-restful/compressor_cache.go b/vendor/github.com/emicklei/go-restful/compressor_cache.go
new file mode 100644
index 00000000..ee426010
--- /dev/null
+++ b/vendor/github.com/emicklei/go-restful/compressor_cache.go
@@ -0,0 +1,103 @@
+package restful
+
+// Copyright 2015 Ernest Micklei. All rights reserved.
+// Use of this source code is governed by a license
+// that can be found in the LICENSE file.
+
+import (
+	"compress/gzip"
+	"compress/zlib"
+)
+
+// BoundedCachedCompressors is a CompressorProvider that uses a cache with a fixed amount
+// of writers and readers (resources).
+// If a new resource is acquired and all are in use, it will return a new unmanaged resource.
+type BoundedCachedCompressors struct {
+	gzipWriters     chan *gzip.Writer
+	gzipReaders     chan *gzip.Reader
+	zlibWriters     chan *zlib.Writer
+	writersCapacity int
+	readersCapacity int
+}
+
+// NewBoundedCachedCompressors returns a new, with filled cache,  BoundedCachedCompressors.
+func NewBoundedCachedCompressors(writersCapacity, readersCapacity int) *BoundedCachedCompressors {
+	b := &BoundedCachedCompressors{
+		gzipWriters:     make(chan *gzip.Writer, writersCapacity),
+		gzipReaders:     make(chan *gzip.Reader, readersCapacity),
+		zlibWriters:     make(chan *zlib.Writer, writersCapacity),
+		writersCapacity: writersCapacity,
+		readersCapacity: readersCapacity,
+	}
+	for ix := 0; ix < writersCapacity; ix++ {
+		b.gzipWriters <- newGzipWriter()
+		b.zlibWriters <- newZlibWriter()
+	}
+	for ix := 0; ix < readersCapacity; ix++ {
+		b.gzipReaders <- newGzipReader()
+	}
+	return b
+}
+
+// AcquireGzipWriter returns an resettable *gzip.Writer. Needs to be released.
+func (b *BoundedCachedCompressors) AcquireGzipWriter() *gzip.Writer {
+	var writer *gzip.Writer
+	select {
+	case writer, _ = <-b.gzipWriters:
+	default:
+		// return a new unmanaged one
+		writer = newGzipWriter()
+	}
+	return writer
+}
+
+// ReleaseGzipWriter accepts a writer (does not have to be one that was cached)
+// only when the cache has room for it. It will ignore it otherwise.
+func (b *BoundedCachedCompressors) ReleaseGzipWriter(w *gzip.Writer) {
+	// forget the unmanaged ones
+	if len(b.gzipWriters) < b.writersCapacity {
+		b.gzipWriters <- w
+	}
+}
+
+// AcquireGzipReader returns a *gzip.Reader. Needs to be released.
+func (b *BoundedCachedCompressors) AcquireGzipReader() *gzip.Reader {
+	var reader *gzip.Reader
+	select {
+	case reader, _ = <-b.gzipReaders:
+	default:
+		// return a new unmanaged one
+		reader = newGzipReader()
+	}
+	return reader
+}
+
+// ReleaseGzipReader accepts a reader (does not have to be one that was cached)
+// only when the cache has room for it. It will ignore it otherwise.
+func (b *BoundedCachedCompressors) ReleaseGzipReader(r *gzip.Reader) {
+	// forget the unmanaged ones
+	if len(b.gzipReaders) < b.readersCapacity {
+		b.gzipReaders <- r
+	}
+}
+
+// AcquireZlibWriter returns an resettable *zlib.Writer. Needs to be released.
+func (b *BoundedCachedCompressors) AcquireZlibWriter() *zlib.Writer {
+	var writer *zlib.Writer
+	select {
+	case writer, _ = <-b.zlibWriters:
+	default:
+		// return a new unmanaged one
+		writer = newZlibWriter()
+	}
+	return writer
+}
+
+// ReleaseZlibWriter accepts a writer (does not have to be one that was cached)
+// only when the cache has room for it. It will ignore it otherwise.
+func (b *BoundedCachedCompressors) ReleaseZlibWriter(w *zlib.Writer) {
+	// forget the unmanaged ones
+	if len(b.zlibWriters) < b.writersCapacity {
+		b.zlibWriters <- w
+	}
+}
diff --git a/vendor/github.com/emicklei/go-restful/compressor_pools.go b/vendor/github.com/emicklei/go-restful/compressor_pools.go
new file mode 100644
index 00000000..d866ce64
--- /dev/null
+++ b/vendor/github.com/emicklei/go-restful/compressor_pools.go
@@ -0,0 +1,91 @@
+package restful
+
+// Copyright 2015 Ernest Micklei. All rights reserved.
+// Use of this source code is governed by a license
+// that can be found in the LICENSE file.
+
+import (
+	"bytes"
+	"compress/gzip"
+	"compress/zlib"
+	"sync"
+)
+
+// SyncPoolCompessors is a CompressorProvider that use the standard sync.Pool.
+type SyncPoolCompessors struct {
+	GzipWriterPool *sync.Pool
+	GzipReaderPool *sync.Pool
+	ZlibWriterPool *sync.Pool
+}
+
+// NewSyncPoolCompessors returns a new ("empty") SyncPoolCompessors.
+func NewSyncPoolCompessors() *SyncPoolCompessors {
+	return &SyncPoolCompessors{
+		GzipWriterPool: &sync.Pool{
+			New: func() interface{} { return newGzipWriter() },
+		},
+		GzipReaderPool: &sync.Pool{
+			New: func() interface{} { return newGzipReader() },
+		},
+		ZlibWriterPool: &sync.Pool{
+			New: func() interface{} { return newZlibWriter() },
+		},
+	}
+}
+
+func (s *SyncPoolCompessors) AcquireGzipWriter() *gzip.Writer {
+	return s.GzipWriterPool.Get().(*gzip.Writer)
+}
+
+func (s *SyncPoolCompessors) ReleaseGzipWriter(w *gzip.Writer) {
+	s.GzipWriterPool.Put(w)
+}
+
+func (s *SyncPoolCompessors) AcquireGzipReader() *gzip.Reader {
+	return s.GzipReaderPool.Get().(*gzip.Reader)
+}
+
+func (s *SyncPoolCompessors) ReleaseGzipReader(r *gzip.Reader) {
+	s.GzipReaderPool.Put(r)
+}
+
+func (s *SyncPoolCompessors) AcquireZlibWriter() *zlib.Writer {
+	return s.ZlibWriterPool.Get().(*zlib.Writer)
+}
+
+func (s *SyncPoolCompessors) ReleaseZlibWriter(w *zlib.Writer) {
+	s.ZlibWriterPool.Put(w)
+}
+
+func newGzipWriter() *gzip.Writer {
+	// create with an empty bytes writer; it will be replaced before using the gzipWriter
+	writer, err := gzip.NewWriterLevel(new(bytes.Buffer), gzip.BestSpeed)
+	if err != nil {
+		panic(err.Error())
+	}
+	return writer
+}
+
+func newGzipReader() *gzip.Reader {
+	// create with an empty reader (but with GZIP header); it will be replaced before using the gzipReader
+	// we can safely use currentCompressProvider because it is set on package initialization.
+	w := currentCompressorProvider.AcquireGzipWriter()
+	defer currentCompressorProvider.ReleaseGzipWriter(w)
+	b := new(bytes.Buffer)
+	w.Reset(b)
+	w.Flush()
+	w.Close()
+	reader, err := gzip.NewReader(bytes.NewReader(b.Bytes()))
+	if err != nil {
+		panic(err.Error())
+	}
+	return reader
+}
+
+func newZlibWriter() *zlib.Writer {
+	writer, err := zlib.NewWriterLevel(new(bytes.Buffer), gzip.BestSpeed)
+	if err != nil {
+		panic(err.Error())
+	}
+	return writer
+}
diff --git a/vendor/github.com/emicklei/go-restful/compressors.go b/vendor/github.com/emicklei/go-restful/compressors.go
new file mode 100644
index 00000000..9db4a8c8
--- /dev/null
+++ b/vendor/github.com/emicklei/go-restful/compressors.go
@@ -0,0 +1,54 @@
+package restful
+
+// Copyright 2015 Ernest Micklei. All rights reserved.
+// Use of this source code is governed by a license
+// that can be found in the LICENSE file.
+
+import (
+	"compress/gzip"
+	"compress/zlib"
+)
+
+// CompressorProvider describes a component that can provider compressors for the std methods.
+type CompressorProvider interface {
+	// Returns a *gzip.Writer which needs to be released later.
+	// Before using it, call Reset().
+	AcquireGzipWriter() *gzip.Writer
+
+	// Releases an acquired *gzip.Writer.
+	ReleaseGzipWriter(w *gzip.Writer)
+
+	// Returns a *gzip.Reader which needs to be released later.
+	AcquireGzipReader() *gzip.Reader
+
+	// Releases an acquired *gzip.Reader.
+	ReleaseGzipReader(w *gzip.Reader)
+
+	// Returns a *zlib.Writer which needs to be released later.
+	// Before using it, call Reset().
+	AcquireZlibWriter() *zlib.Writer
+
+	// Releases an acquired *zlib.Writer.
+	ReleaseZlibWriter(w *zlib.Writer)
+}
+
+// DefaultCompressorProvider is the actual provider of compressors (zlib or gzip).
+var currentCompressorProvider CompressorProvider
+
+func init() {
+	currentCompressorProvider = NewSyncPoolCompessors()
+}
+
+// CurrentCompressorProvider returns the current CompressorProvider.
+// It is initialized using a SyncPoolCompessors.
+func CurrentCompressorProvider() CompressorProvider {
+	return currentCompressorProvider
+}
+
+// SetCompressorProvider sets the actual provider of compressors (zlib or gzip).
+func SetCompressorProvider(p CompressorProvider) {
+	if p == nil {
+		panic("cannot set compressor provider to nil")
+	}
+	currentCompressorProvider = p
+}
diff --git a/vendor/github.com/emicklei/go-restful/constants.go b/vendor/github.com/emicklei/go-restful/constants.go
new file mode 100644
index 00000000..203439c5
--- /dev/null
+++ b/vendor/github.com/emicklei/go-restful/constants.go
@@ -0,0 +1,30 @@
+package restful
+
+// Copyright 2013 Ernest Micklei. All rights reserved.
+// Use of this source code is governed by a license
+// that can be found in the LICENSE file.
+
+const (
+	MIME_XML   = "application/xml"          // Accept or Content-Type used in Consumes() and/or Produces()
+	MIME_JSON  = "application/json"         // Accept or Content-Type used in Consumes() and/or Produces()
+	MIME_OCTET = "application/octet-stream" // If Content-Type is not present in request, use the default
+
+	HEADER_Allow                         = "Allow"
+	HEADER_Accept                        = "Accept"
+	HEADER_Origin                        = "Origin"
+	HEADER_ContentType                   = "Content-Type"
+	HEADER_LastModified                  = "Last-Modified"
+	HEADER_AcceptEncoding                = "Accept-Encoding"
+	HEADER_ContentEncoding               = "Content-Encoding"
+	HEADER_AccessControlExposeHeaders    = "Access-Control-Expose-Headers"
+	HEADER_AccessControlRequestMethod    = "Access-Control-Request-Method"
+	HEADER_AccessControlRequestHeaders   = "Access-Control-Request-Headers"
+	HEADER_AccessControlAllowMethods     = "Access-Control-Allow-Methods"
+	HEADER_AccessControlAllowOrigin      = "Access-Control-Allow-Origin"
+	HEADER_AccessControlAllowCredentials = "Access-Control-Allow-Credentials"
+	HEADER_AccessControlAllowHeaders     = "Access-Control-Allow-Headers"
+	HEADER_AccessControlMaxAge           = "Access-Control-Max-Age"
+
+	ENCODING_GZIP    = "gzip"
+	ENCODING_DEFLATE = "deflate"
+)
diff --git a/vendor/github.com/emicklei/go-restful/container.go b/vendor/github.com/emicklei/go-restful/container.go
new file mode 100644
index 00000000..061a8d71
--- /dev/null
+++ b/vendor/github.com/emicklei/go-restful/container.go
@@ -0,0 +1,377 @@
+package restful
+
+// Copyright 2013 Ernest Micklei. All rights reserved.
+// Use of this source code is governed by a license
+// that can be found in the LICENSE file.
+
+import (
+	"bytes"
+	"errors"
+	"fmt"
+	"net/http"
+	"os"
+	"runtime"
+	"strings"
+	"sync"
+
+	"github.com/emicklei/go-restful/log"
+)
+
+// Container holds a collection of WebServices and a http.ServeMux to dispatch http requests.
+// The requests are further dispatched to routes of WebServices using a RouteSelector
+type Container struct {
+	webServicesLock        sync.RWMutex
+	webServices            []*WebService
+	ServeMux               *http.ServeMux
+	isRegisteredOnRoot     bool
+	containerFilters       []FilterFunction
+	doNotRecover           bool // default is true
+	recoverHandleFunc      RecoverHandleFunction
+	serviceErrorHandleFunc ServiceErrorHandleFunction
+	router                 RouteSelector // default is a CurlyRouter (RouterJSR311 is a slower alternative)
+	contentEncodingEnabled bool          // default is false
+}
+
+// NewContainer creates a new Container using a new ServeMux and default router (CurlyRouter)
+func NewContainer() *Container {
+	return &Container{
+		webServices:            []*WebService{},
+		ServeMux:               http.NewServeMux(),
+		isRegisteredOnRoot:     false,
+		containerFilters:       []FilterFunction{},
+		doNotRecover:           true,
+		recoverHandleFunc:      logStackOnRecover,
+		serviceErrorHandleFunc: writeServiceError,
+		router:                 CurlyRouter{},
+		contentEncodingEnabled: false}
+}
+
+// RecoverHandleFunction declares functions that can be used to handle a panic situation.
+// The first argument is what recover() returns. The second must be used to communicate an error response.
+type RecoverHandleFunction func(interface{}, http.ResponseWriter)
+
+// RecoverHandler changes the default function (logStackOnRecover) to be called
+// when a panic is detected. DoNotRecover must be have its default value (=false).
+func (c *Container) RecoverHandler(handler RecoverHandleFunction) {
+	c.recoverHandleFunc = handler
+}
+
+// ServiceErrorHandleFunction declares functions that can be used to handle a service error situation.
+// The first argument is the service error, the second is the request that resulted in the error and
+// the third must be used to communicate an error response.
+type ServiceErrorHandleFunction func(ServiceError, *Request, *Response)
+
+// ServiceErrorHandler changes the default function (writeServiceError) to be called
+// when a ServiceError is detected.
+func (c *Container) ServiceErrorHandler(handler ServiceErrorHandleFunction) {
+	c.serviceErrorHandleFunc = handler
+}
+
+// DoNotRecover controls whether panics will be caught to return HTTP 500.
+// If set to true, Route functions are responsible for handling any error situation.
+// Default value is true.
+func (c *Container) DoNotRecover(doNot bool) {
+	c.doNotRecover = doNot
+}
+
+// Router changes the default Router (currently CurlyRouter)
+func (c *Container) Router(aRouter RouteSelector) {
+	c.router = aRouter
+}
+
+// EnableContentEncoding (default=false) allows for GZIP or DEFLATE encoding of responses.
+func (c *Container) EnableContentEncoding(enabled bool) {
+	c.contentEncodingEnabled = enabled
+}
+
+// Add a WebService to the Container. It will detect duplicate root paths and exit in that case.
+func (c *Container) Add(service *WebService) *Container {
+	c.webServicesLock.Lock()
+	defer c.webServicesLock.Unlock()
+
+	// if rootPath was not set then lazy initialize it
+	if len(service.rootPath) == 0 {
+		service.Path("/")
+	}
+
+	// cannot have duplicate root paths
+	for _, each := range c.webServices {
+		if each.RootPath() == service.RootPath() {
+			log.Printf("WebService with duplicate root path detected:['%v']", each)
+			os.Exit(1)
+		}
+	}
+
+	// If not registered on root then add specific mapping
+	if !c.isRegisteredOnRoot {
+		c.isRegisteredOnRoot = c.addHandler(service, c.ServeMux)
+	}
+	c.webServices = append(c.webServices, service)
+	return c
+}
+
+// addHandler may set a new HandleFunc for the serveMux
+// this function must run inside the critical region protected by the webServicesLock.
+// returns true if the function was registered on root ("/")
+func (c *Container) addHandler(service *WebService, serveMux *http.ServeMux) bool {
+	pattern := fixedPrefixPath(service.RootPath())
+	// check if root path registration is needed
+	if "/" == pattern || "" == pattern {
+		serveMux.HandleFunc("/", c.dispatch)
+		return true
+	}
+	// detect if registration already exists
+	alreadyMapped := false
+	for _, each := range c.webServices {
+		if each.RootPath() == service.RootPath() {
+			alreadyMapped = true
+			break
+		}
+	}
+	if !alreadyMapped {
+		serveMux.HandleFunc(pattern, c.dispatch)
+		if !strings.HasSuffix(pattern, "/") {
+			serveMux.HandleFunc(pattern+"/", c.dispatch)
+		}
+	}
+	return false
+}
+
+func (c *Container) Remove(ws *WebService) error {
+	if c.ServeMux == http.DefaultServeMux {
+		errMsg := fmt.Sprintf("cannot remove a WebService from a Container using the DefaultServeMux: ['%v']", ws)
+		log.Print(errMsg)
+		return errors.New(errMsg)
+	}
+	c.webServicesLock.Lock()
+	defer c.webServicesLock.Unlock()
+	// build a new ServeMux and re-register all WebServices
+	newServeMux := http.NewServeMux()
+	newServices := []*WebService{}
+	newIsRegisteredOnRoot := false
+	for _, each := range c.webServices {
+		if each.rootPath != ws.rootPath {
+			// If not registered on root then add specific mapping
+			if !newIsRegisteredOnRoot {
+				newIsRegisteredOnRoot = c.addHandler(each, newServeMux)
+			}
+			newServices = append(newServices, each)
+		}
+	}
+	c.webServices, c.ServeMux, c.isRegisteredOnRoot = newServices, newServeMux, newIsRegisteredOnRoot
+	return nil
+}
+
+// logStackOnRecover is the default RecoverHandleFunction and is called
+// when DoNotRecover is false and the recoverHandleFunc is not set for the container.
+// Default implementation logs the stacktrace and writes the stacktrace on the response.
+// This may be a security issue as it exposes sourcecode information.
+func logStackOnRecover(panicReason interface{}, httpWriter http.ResponseWriter) {
+	var buffer bytes.Buffer
+	buffer.WriteString(fmt.Sprintf("recover from panic situation: - %v\r\n", panicReason))
+	for i := 2; ; i += 1 {
+		_, file, line, ok := runtime.Caller(i)
+		if !ok {
+			break
+		}
+		buffer.WriteString(fmt.Sprintf("    %s:%d\r\n", file, line))
+	}
+	log.Print(buffer.String())
+	httpWriter.WriteHeader(http.StatusInternalServerError)
+	httpWriter.Write(buffer.Bytes())
+}
+
+// writeServiceError is the default ServiceErrorHandleFunction and is called
+// when a ServiceError is returned during route selection. Default implementation
+// calls resp.WriteErrorString(err.Code, err.Message)
+func writeServiceError(err ServiceError, req *Request, resp *Response) {
+	resp.WriteErrorString(err.Code, err.Message)
+}
+
+// Dispatch the incoming Http Request to a matching WebService.
+func (c *Container) Dispatch(httpWriter http.ResponseWriter, httpRequest *http.Request) {
+	if httpWriter == nil {
+		panic("httpWriter cannot be nil")
+	}
+	if httpRequest == nil {
+		panic("httpRequest cannot be nil")
+	}
+	c.dispatch(httpWriter, httpRequest)
+}
+
+// Dispatch the incoming Http Request to a matching WebService.
+func (c *Container) dispatch(httpWriter http.ResponseWriter, httpRequest *http.Request) {
+	writer := httpWriter
+
+	// CompressingResponseWriter should be closed after all operations are done
+	defer func() {
+		if compressWriter, ok := writer.(*CompressingResponseWriter); ok {
+			compressWriter.Close()
+		}
+	}()
+
+	// Instal panic recovery unless told otherwise
+	if !c.doNotRecover { // catch all for 500 response
+		defer func() {
+			if r := recover(); r != nil {
+				c.recoverHandleFunc(r, writer)
+				return
+			}
+		}()
+	}
+
+	// Find best match Route ; err is non nil if no match was found
+	var webService *WebService
+	var route *Route
+	var err error
+	func() {
+		c.webServicesLock.RLock()
+		defer c.webServicesLock.RUnlock()
+		webService, route, err = c.router.SelectRoute(
+			c.webServices,
+			httpRequest)
+	}()
+
+	// Detect if compression is needed
+	// assume without compression, test for override
+	contentEncodingEnabled := c.contentEncodingEnabled
+	if route != nil && route.contentEncodingEnabled != nil {
+		contentEncodingEnabled = *route.contentEncodingEnabled
+	}
+	if contentEncodingEnabled {
+		doCompress, encoding := wantsCompressedResponse(httpRequest)
+		if doCompress {
+			var err error
+			writer, err = NewCompressingResponseWriter(httpWriter, encoding)
+			if err != nil {
+				log.Print("unable to install compressor: ", err)
+				httpWriter.WriteHeader(http.StatusInternalServerError)
+				return
+			}
+		}
+	}
+
+	if err != nil {
+		// a non-200 response has already been written
+		// run container filters anyway ; they should not touch the response...
+		chain := FilterChain{Filters: c.containerFilters, Target: func(req *Request, resp *Response) {
+			switch err.(type) {
+			case ServiceError:
+				ser := err.(ServiceError)
+				c.serviceErrorHandleFunc(ser, req, resp)
+			}
+			// TODO
+		}}
+		chain.ProcessFilter(NewRequest(httpRequest), NewResponse(writer))
+		return
+	}
+	pathProcessor, routerProcessesPath := c.router.(PathProcessor)
+	if !routerProcessesPath {
+		pathProcessor = defaultPathProcessor{}
+	}
+	pathParams := pathProcessor.ExtractParameters(route, webService, httpRequest.URL.Path)
+	wrappedRequest, wrappedResponse := route.wrapRequestResponse(writer, httpRequest, pathParams)
+	// pass through filters (if any)
+	if len(c.containerFilters)+len(webService.filters)+len(route.Filters) > 0 {
+		// compose filter chain
+		allFilters := []FilterFunction{}
+		allFilters = append(allFilters, c.containerFilters...)
+		allFilters = append(allFilters, webService.filters...)
+		allFilters = append(allFilters, route.Filters...)
+		chain := FilterChain{Filters: allFilters, Target: func(req *Request, resp *Response) {
+			// handle request by route after passing all filters
+			route.Function(wrappedRequest, wrappedResponse)
+		}}
+		chain.ProcessFilter(wrappedRequest, wrappedResponse)
+	} else {
+		// no filters, handle request by route
+		route.Function(wrappedRequest, wrappedResponse)
+	}
+}
+
+// fixedPrefixPath returns the fixed part of the partspec ; it may include template vars {}
+func fixedPrefixPath(pathspec string) string {
+	varBegin := strings.Index(pathspec, "{")
+	if -1 == varBegin {
+		return pathspec
+	}
+	return pathspec[:varBegin]
+}
+
+// ServeHTTP implements net/http.Handler therefore a Container can be a Handler in a http.Server
+func (c *Container) ServeHTTP(httpwriter http.ResponseWriter, httpRequest *http.Request) {
+	c.ServeMux.ServeHTTP(httpwriter, httpRequest)
+}
+
+// Handle registers the handler for the given pattern. If a handler already exists for pattern, Handle panics.
+func (c *Container) Handle(pattern string, handler http.Handler) {
+	c.ServeMux.Handle(pattern, handler)
+}
+
+// HandleWithFilter registers the handler for the given pattern.
+// Container's filter chain is applied for handler.
+// If a handler already exists for pattern, HandleWithFilter panics.
+func (c *Container) HandleWithFilter(pattern string, handler http.Handler) {
+	f := func(httpResponse http.ResponseWriter, httpRequest *http.Request) {
+		if len(c.containerFilters) == 0 {
+			handler.ServeHTTP(httpResponse, httpRequest)
+			return
+		}
+
+		chain := FilterChain{Filters: c.containerFilters, Target: func(req *Request, resp *Response) {
+			handler.ServeHTTP(httpResponse, httpRequest)
+		}}
+		chain.ProcessFilter(NewRequest(httpRequest), NewResponse(httpResponse))
+	}
+
+	c.Handle(pattern, http.HandlerFunc(f))
+}
+
+// Filter appends a container FilterFunction. These are called before dispatching
+// a http.Request to a WebService from the container
+func (c *Container) Filter(filter FilterFunction) {
+	c.containerFilters = append(c.containerFilters, filter)
+}
+
+// RegisteredWebServices returns the collections of added WebServices
+func (c *Container) RegisteredWebServices() []*WebService {
+	c.webServicesLock.RLock()
+	defer c.webServicesLock.RUnlock()
+	result := make([]*WebService, len(c.webServices))
+	for ix := range c.webServices {
+		result[ix] = c.webServices[ix]
+	}
+	return result
+}
+
+// computeAllowedMethods returns a list of HTTP methods that are valid for a Request
+func (c *Container) computeAllowedMethods(req *Request) []string {
+	// Go through all RegisteredWebServices() and all its Routes to collect the options
+	methods := []string{}
+	requestPath := req.Request.URL.Path
+	for _, ws := range c.RegisteredWebServices() {
+		matches := ws.pathExpr.Matcher.FindStringSubmatch(requestPath)
+		if matches != nil {
+			finalMatch := matches[len(matches)-1]
+			for _, rt := range ws.Routes() {
+				matches := rt.pathExpr.Matcher.FindStringSubmatch(finalMatch)
+				if matches != nil {
+					lastMatch := matches[len(matches)-1]
+					if lastMatch == "" || lastMatch == "/" { // do not include if value is neither empty nor ‘/’.
+						methods = append(methods, rt.Method)
+					}
+				}
+			}
+		}
+	}
+	// methods = append(methods, "OPTIONS")  not sure about this
+	return methods
+}
+
+// newBasicRequestResponse creates a pair of Request,Response from its http versions.
+// It is basic because no parameter or (produces) content-type information is given.
+func newBasicRequestResponse(httpWriter http.ResponseWriter, httpRequest *http.Request) (*Request, *Response) {
+	resp := NewResponse(httpWriter)
+	resp.requestAccept = httpRequest.Header.Get(HEADER_Accept)
+	return NewRequest(httpRequest), resp
+}
diff --git a/vendor/github.com/emicklei/go-restful/cors_filter.go b/vendor/github.com/emicklei/go-restful/cors_filter.go
new file mode 100644
index 00000000..1efeef07
--- /dev/null
+++ b/vendor/github.com/emicklei/go-restful/cors_filter.go
@@ -0,0 +1,202 @@
+package restful
+
+// Copyright 2013 Ernest Micklei. All rights reserved.
+// Use of this source code is governed by a license
+// that can be found in the LICENSE file.
+
+import (
+	"regexp"
+	"strconv"
+	"strings"
+)
+
+// CrossOriginResourceSharing is used to create a Container Filter that implements CORS.
+// Cross-origin resource sharing (CORS) is a mechanism that allows JavaScript on a web page
+// to make XMLHttpRequests to another domain, not the domain the JavaScript originated from.
+//
+// http://en.wikipedia.org/wiki/Cross-origin_resource_sharing
+// http://enable-cors.org/server.html
+// http://www.html5rocks.com/en/tutorials/cors/#toc-handling-a-not-so-simple-request
+type CrossOriginResourceSharing struct {
+	ExposeHeaders  []string // list of Header names
+	AllowedHeaders []string // list of Header names
+	AllowedDomains []string // list of allowed values for Http Origin. An allowed value can be a regular expression to support subdomain matching. If empty all are allowed.
+	AllowedMethods []string
+	MaxAge         int // number of seconds before requiring new Options request
+	CookiesAllowed bool
+	Container      *Container
+
+	allowedOriginPatterns []*regexp.Regexp // internal field for origin regexp check.
+}
+
+// Filter is a filter function that implements the CORS flow as documented on http://enable-cors.org/server.html
+// and http://www.html5rocks.com/static/images/cors_server_flowchart.png
+func (c CrossOriginResourceSharing) Filter(req *Request, resp *Response, chain *FilterChain) {
+	origin := req.Request.Header.Get(HEADER_Origin)
+	if len(origin) == 0 {
+		if trace {
+			traceLogger.Print("no Http header Origin set")
+		}
+		chain.ProcessFilter(req, resp)
+		return
+	}
+	if !c.isOriginAllowed(origin) { // check whether this origin is allowed
+		if trace {
+			traceLogger.Printf("HTTP Origin:%s is not part of %v, neither matches any part of %v", origin, c.AllowedDomains, c.allowedOriginPatterns)
+		}
+		chain.ProcessFilter(req, resp)
+		return
+	}
+	if req.Request.Method != "OPTIONS" {
+		c.doActualRequest(req, resp)
+		chain.ProcessFilter(req, resp)
+		return
+	}
+	if acrm := req.Request.Header.Get(HEADER_AccessControlRequestMethod); acrm != "" {
+		c.doPreflightRequest(req, resp)
+	} else {
+		c.doActualRequest(req, resp)
+		chain.ProcessFilter(req, resp)
+		return
+	}
+}
+
+func (c CrossOriginResourceSharing) doActualRequest(req *Request, resp *Response) {
+	c.setOptionsHeaders(req, resp)
+	// continue processing the response
+}
+
+func (c *CrossOriginResourceSharing) doPreflightRequest(req *Request, resp *Response) {
+	if len(c.AllowedMethods) == 0 {
+		if c.Container == nil {
+			c.AllowedMethods = DefaultContainer.computeAllowedMethods(req)
+		} else {
+			c.AllowedMethods = c.Container.computeAllowedMethods(req)
+		}
+	}
+
+	acrm := req.Request.Header.Get(HEADER_AccessControlRequestMethod)
+	if !c.isValidAccessControlRequestMethod(acrm, c.AllowedMethods) {
+		if trace {
+			traceLogger.Printf("Http header %s:%s is not in %v",
+				HEADER_AccessControlRequestMethod,
+				acrm,
+				c.AllowedMethods)
+		}
+		return
+	}
+	acrhs := req.Request.Header.Get(HEADER_AccessControlRequestHeaders)
+	if len(acrhs) > 0 {
+		for _, each := range strings.Split(acrhs, ",") {
+			if !c.isValidAccessControlRequestHeader(strings.Trim(each, " ")) {
+				if trace {
+					traceLogger.Printf("Http header %s:%s is not in %v",
+						HEADER_AccessControlRequestHeaders,
+						acrhs,
+						c.AllowedHeaders)
+				}
+				return
+			}
+		}
+	}
+	resp.AddHeader(HEADER_AccessControlAllowMethods, strings.Join(c.AllowedMethods, ","))
+	resp.AddHeader(HEADER_AccessControlAllowHeaders, acrhs)
+	c.setOptionsHeaders(req, resp)
+
+	// return http 200 response, no body
+}
+
+func (c CrossOriginResourceSharing) setOptionsHeaders(req *Request, resp *Response) {
+	c.checkAndSetExposeHeaders(resp)
+	c.setAllowOriginHeader(req, resp)
+	c.checkAndSetAllowCredentials(resp)
+	if c.MaxAge > 0 {
+		resp.AddHeader(HEADER_AccessControlMaxAge, strconv.Itoa(c.MaxAge))
+	}
+}
+
+func (c CrossOriginResourceSharing) isOriginAllowed(origin string) bool {
+	if len(origin) == 0 {
+		return false
+	}
+	if len(c.AllowedDomains) == 0 {
+		return true
+	}
+
+	allowed := false
+	for _, domain := range c.AllowedDomains {
+		if domain == origin {
+			allowed = true
+			break
+		}
+	}
+
+	if !allowed {
+		if len(c.allowedOriginPatterns) == 0 {
+			// compile allowed domains to allowed origin patterns
+			allowedOriginRegexps, err := compileRegexps(c.AllowedDomains)
+			if err != nil {
+				return false
+			}
+			c.allowedOriginPatterns = allowedOriginRegexps
+		}
+
+		for _, pattern := range c.allowedOriginPatterns {
+			if allowed = pattern.MatchString(origin); allowed {
+				break
+			}
+		}
+	}
+
+	return allowed
+}
+
+func (c CrossOriginResourceSharing) setAllowOriginHeader(req *Request, resp *Response) {
+	origin := req.Request.Header.Get(HEADER_Origin)
+	if c.isOriginAllowed(origin) {
+		resp.AddHeader(HEADER_AccessControlAllowOrigin, origin)
+	}
+}
+
+func (c CrossOriginResourceSharing) checkAndSetExposeHeaders(resp *Response) {
+	if len(c.ExposeHeaders) > 0 {
+		resp.AddHeader(HEADER_AccessControlExposeHeaders, strings.Join(c.ExposeHeaders, ","))
+	}
+}
+
+func (c CrossOriginResourceSharing) checkAndSetAllowCredentials(resp *Response) {
+	if c.CookiesAllowed {
+		resp.AddHeader(HEADER_AccessControlAllowCredentials, "true")
+	}
+}
+
+func (c CrossOriginResourceSharing) isValidAccessControlRequestMethod(method string, allowedMethods []string) bool {
+	for _, each := range allowedMethods {
+		if each == method {
+			return true
+		}
+	}
+	return false
+}
+
+func (c CrossOriginResourceSharing) isValidAccessControlRequestHeader(header string) bool {
+	for _, each := range c.AllowedHeaders {
+		if strings.ToLower(each) == strings.ToLower(header) {
+			return true
+		}
+	}
+	return false
+}
+
+// Take a list of strings and compile them into a list of regular expressions.
+func compileRegexps(regexpStrings []string) ([]*regexp.Regexp, error) {
+	regexps := []*regexp.Regexp{}
+	for _, regexpStr := range regexpStrings {
+		r, err := regexp.Compile(regexpStr)
+		if err != nil {
+			return regexps, err
+		}
+		regexps = append(regexps, r)
+	}
+	return regexps, nil
+}
diff --git a/vendor/github.com/emicklei/go-restful/coverage.sh b/vendor/github.com/emicklei/go-restful/coverage.sh
new file mode 100644
index 00000000..e27dbf1a
--- /dev/null
+++ b/vendor/github.com/emicklei/go-restful/coverage.sh
@@ -0,0 +1,2 @@
+go test -coverprofile=coverage.out
+go tool cover -html=coverage.out
\ No newline at end of file
diff --git a/vendor/github.com/emicklei/go-restful/curly.go b/vendor/github.com/emicklei/go-restful/curly.go
new file mode 100644
index 00000000..14d5b76b
--- /dev/null
+++ b/vendor/github.com/emicklei/go-restful/curly.go
@@ -0,0 +1,164 @@
+package restful
+
+// Copyright 2013 Ernest Micklei. All rights reserved.
+// Use of this source code is governed by a license
+// that can be found in the LICENSE file.
+
+import (
+	"net/http"
+	"regexp"
+	"sort"
+	"strings"
+)
+
+// CurlyRouter expects Routes with paths that contain zero or more parameters in curly brackets.
+type CurlyRouter struct{}
+
+// SelectRoute is part of the Router interface and returns the best match
+// for the WebService and its Route for the given Request.
+func (c CurlyRouter) SelectRoute(
+	webServices []*WebService,
+	httpRequest *http.Request) (selectedService *WebService, selected *Route, err error) {
+
+	requestTokens := tokenizePath(httpRequest.URL.Path)
+
+	detectedService := c.detectWebService(requestTokens, webServices)
+	if detectedService == nil {
+		if trace {
+			traceLogger.Printf("no WebService was found to match URL path:%s\n", httpRequest.URL.Path)
+		}
+		return nil, nil, NewError(http.StatusNotFound, "404: Page Not Found")
+	}
+	candidateRoutes := c.selectRoutes(detectedService, requestTokens)
+	if len(candidateRoutes) == 0 {
+		if trace {
+			traceLogger.Printf("no Route in WebService with path %s was found to match URL path:%s\n", detectedService.rootPath, httpRequest.URL.Path)
+		}
+		return detectedService, nil, NewError(http.StatusNotFound, "404: Page Not Found")
+	}
+	selectedRoute, err := c.detectRoute(candidateRoutes, httpRequest)
+	if selectedRoute == nil {
+		return detectedService, nil, err
+	}
+	return detectedService, selectedRoute, nil
+}
+
+// selectRoutes return a collection of Route from a WebService that matches the path tokens from the request.
+func (c CurlyRouter) selectRoutes(ws *WebService, requestTokens []string) sortableCurlyRoutes {
+	candidates := make(sortableCurlyRoutes, 0, 8)
+	for _, each := range ws.routes {
+		matches, paramCount, staticCount := c.matchesRouteByPathTokens(each.pathParts, requestTokens)
+		if matches {
+			candidates.add(curlyRoute{each, paramCount, staticCount}) // TODO make sure Routes() return pointers?
+		}
+	}
+	sort.Sort(candidates)
+	return candidates
+}
+
+// matchesRouteByPathTokens computes whether it matches, howmany parameters do match and what the number of static path elements are.
+func (c CurlyRouter) matchesRouteByPathTokens(routeTokens, requestTokens []string) (matches bool, paramCount int, staticCount int) {
+	if len(routeTokens) < len(requestTokens) {
+		// proceed in matching only if last routeToken is wildcard
+		count := len(routeTokens)
+		if count == 0 || !strings.HasSuffix(routeTokens[count-1], "*}") {
+			return false, 0, 0
+		}
+		// proceed
+	}
+	for i, routeToken := range routeTokens {
+		if i == len(requestTokens) {
+			// reached end of request path
+			return false, 0, 0
+		}
+		requestToken := requestTokens[i]
+		if strings.HasPrefix(routeToken, "{") {
+			paramCount++
+			if colon := strings.Index(routeToken, ":"); colon != -1 {
+				// match by regex
+				matchesToken, matchesRemainder := c.regularMatchesPathToken(routeToken, colon, requestToken)
+				if !matchesToken {
+					return false, 0, 0
+				}
+				if matchesRemainder {
+					break
+				}
+			}
+		} else { // no { prefix
+			if requestToken != routeToken {
+				return false, 0, 0
+			}
+			staticCount++
+		}
+	}
+	return true, paramCount, staticCount
+}
+
+// regularMatchesPathToken tests whether the regular expression part of routeToken matches the requestToken or all remaining tokens
+// format routeToken is {someVar:someExpression}, e.g. {zipcode:[\d][\d][\d][\d][A-Z][A-Z]}
+func (c CurlyRouter) regularMatchesPathToken(routeToken string, colon int, requestToken string) (matchesToken bool, matchesRemainder bool) {
+	regPart := routeToken[colon+1 : len(routeToken)-1]
+	if regPart == "*" {
+		if trace {
+			traceLogger.Printf("wildcard parameter detected in route token %s that matches %s\n", routeToken, requestToken)
+		}
+		return true, true
+	}
+	matched, err := regexp.MatchString(regPart, requestToken)
+	return (matched && err == nil), false
+}
+
+var jsr311Router = RouterJSR311{}
+
+// detectRoute selectes from a list of Route the first match by inspecting both the Accept and Content-Type
+// headers of the Request. See also RouterJSR311 in jsr311.go
+func (c CurlyRouter) detectRoute(candidateRoutes sortableCurlyRoutes, httpRequest *http.Request) (*Route, error) {
+	// tracing is done inside detectRoute
+	return jsr311Router.detectRoute(candidateRoutes.routes(), httpRequest)
+}
+
+// detectWebService returns the best matching webService given the list of path tokens.
+// see also computeWebserviceScore
+func (c CurlyRouter) detectWebService(requestTokens []string, webServices []*WebService) *WebService {
+	var best *WebService
+	score := -1
+	for _, each := range webServices {
+		matches, eachScore := c.computeWebserviceScore(requestTokens, each.pathExpr.tokens)
+		if matches && (eachScore > score) {
+			best = each
+			score = eachScore
+		}
+	}
+	return best
+}
+
+// computeWebserviceScore returns whether tokens match and
+// the weighted score of the longest matching consecutive tokens from the beginning.
+func (c CurlyRouter) computeWebserviceScore(requestTokens []string, tokens []string) (bool, int) {
+	if len(tokens) > len(requestTokens) {
+		return false, 0
+	}
+	score := 0
+	for i := 0; i < len(tokens); i++ {
+		each := requestTokens[i]
+		other := tokens[i]
+		if len(each) == 0 && len(other) == 0 {
+			score++
+			continue
+		}
+		if len(other) > 0 && strings.HasPrefix(other, "{") {
+			// no empty match
+			if len(each) == 0 {
+				return false, score
+			}
+			score += 1
+		} else {
+			// not a parameter
+			if each != other {
+				return false, score
+			}
+			score += (len(tokens) - i) * 10 //fuzzy
+		}
+	}
+	return true, score
+}
diff --git a/vendor/github.com/emicklei/go-restful/curly_route.go b/vendor/github.com/emicklei/go-restful/curly_route.go
new file mode 100644
index 00000000..403dd3be
--- /dev/null
+++ b/vendor/github.com/emicklei/go-restful/curly_route.go
@@ -0,0 +1,54 @@
+package restful
+
+// Copyright 2013 Ernest Micklei. All rights reserved.
+// Use of this source code is governed by a license
+// that can be found in the LICENSE file.
+
+// curlyRoute exits for sorting Routes by the CurlyRouter based on number of parameters and number of static path elements.
+type curlyRoute struct {
+	route       Route
+	paramCount  int
+	staticCount int
+}
+
+// sortableCurlyRoutes orders by most parameters and path elements first.
+type sortableCurlyRoutes []curlyRoute
+
+func (s *sortableCurlyRoutes) add(route curlyRoute) {
+	*s = append(*s, route)
+}
+
+func (s sortableCurlyRoutes) routes() (routes []Route) {
+	routes = make([]Route, 0, len(s))
+	for _, each := range s {
+		routes = append(routes, each.route) // TODO change return type
+	}
+	return routes
+}
+
+func (s sortableCurlyRoutes) Len() int {
+	return len(s)
+}
+func (s sortableCurlyRoutes) Swap(i, j int) {
+	s[i], s[j] = s[j], s[i]
+}
+func (s sortableCurlyRoutes) Less(i, j int) bool {
+	a := s[j]
+	b := s[i]
+
+	// primary key
+	if a.staticCount < b.staticCount {
+		return true
+	}
+	if a.staticCount > b.staticCount {
+		return false
+	}
+	// secundary key
+	if a.paramCount < b.paramCount {
+		return true
+	}
+	if a.paramCount > b.paramCount {
+		return false
+	}
+	return a.route.Path < b.route.Path
+}
diff --git a/vendor/github.com/emicklei/go-restful/doc.go b/vendor/github.com/emicklei/go-restful/doc.go
new file mode 100644
index 00000000..f7c16b01
--- /dev/null
+++ b/vendor/github.com/emicklei/go-restful/doc.go
@@ -0,0 +1,185 @@
+/*
+Package restful , a lean package for creating REST-style WebServices without magic.
+
+WebServices and Routes
+
+A WebService has a collection of Route objects that dispatch incoming Http Requests to a function calls.
+Typically, a WebService has a root path (e.g. /users) and defines common MIME types for its routes.
+WebServices must be added to a container (see below) in order to handler Http requests from a server.
+
+A Route is defined by a HTTP method, an URL path and (optionally) the MIME types it consumes (Content-Type) and produces (Accept).
+This package has the logic to find the best matching Route and if found, call its Function.
+
+	ws := new(restful.WebService)
+	ws.
+		Path("/users").
+		Consumes(restful.MIME_JSON, restful.MIME_XML).
+		Produces(restful.MIME_JSON, restful.MIME_XML)
+
+	ws.Route(ws.GET("/{user-id}").To(u.findUser))  // u is a UserResource
+
+	...
+
+	// GET http://localhost:8080/users/1
+	func (u UserResource) findUser(request *restful.Request, response *restful.Response) {
+		id := request.PathParameter("user-id")
+		...
+	}
+
+The (*Request, *Response) arguments provide functions for reading information from the request and writing information back to the response.
+
+See the example https://github.com/emicklei/go-restful/blob/master/examples/restful-user-resource.go with a full implementation.
+
+Regular expression matching Routes
+
+A Route parameter can be specified using the format "uri/{var[:regexp]}" or the special version "uri/{var:*}" for matching the tail of the path.
+For example, /persons/{name:[A-Z][A-Z]} can be used to restrict values for the parameter "name" to only contain capital alphabetic characters.
+Regular expressions must use the standard Go syntax as described in the regexp package. (https://code.google.com/p/re2/wiki/Syntax)
+This feature requires the use of a CurlyRouter.
+
+Containers
+
+A Container holds a collection of WebServices, Filters and a http.ServeMux for multiplexing http requests.
+Using the statements "restful.Add(...) and restful.Filter(...)" will register WebServices and Filters to the Default Container.
+The Default container of go-restful uses the http.DefaultServeMux.
+You can create your own Container and create a new http.Server for that particular container.
+
+	container := restful.NewContainer()
+	server := &http.Server{Addr: ":8081", Handler: container}
+
+Filters
+
+A filter dynamically intercepts requests and responses to transform or use the information contained in the requests or responses.
+You can use filters to perform generic logging, measurement, authentication, redirect, set response headers etc.
+In the restful package there are three hooks into the request,response flow where filters can be added.
+Each filter must define a FilterFunction:
+
+	func (req *restful.Request, resp *restful.Response, chain *restful.FilterChain)
+
+Use the following statement to pass the request,response pair to the next filter or RouteFunction
+
+	chain.ProcessFilter(req, resp)
+
+Container Filters
+
+These are processed before any registered WebService.
+
+	// install a (global) filter for the default container (processed before any webservice)
+	restful.Filter(globalLogging)
+
+WebService Filters
+
+These are processed before any Route of a WebService.
+
+	// install a webservice filter (processed before any route)
+	ws.Filter(webserviceLogging).Filter(measureTime)
+
+
+Route Filters
+
+These are processed before calling the function associated with the Route.
+
+	// install 2 chained route filters (processed before calling findUser)
+	ws.Route(ws.GET("/{user-id}").Filter(routeLogging).Filter(NewCountFilter().routeCounter).To(findUser))
+
+See the example https://github.com/emicklei/go-restful/blob/master/examples/restful-filters.go with full implementations.
+
+Response Encoding
+
+Two encodings are supported: gzip and deflate. To enable this for all responses:
+
+	restful.DefaultContainer.EnableContentEncoding(true)
+
+If a Http request includes the Accept-Encoding header then the response content will be compressed using the specified encoding.
+Alternatively, you can create a Filter that performs the encoding and install it per WebService or Route.
+
+See the example https://github.com/emicklei/go-restful/blob/master/examples/restful-encoding-filter.go
+
+OPTIONS support
+
+By installing a pre-defined container filter, your Webservice(s) can respond to the OPTIONS Http request.
+
+	Filter(OPTIONSFilter())
+
+CORS
+
+By installing the filter of a CrossOriginResourceSharing (CORS), your WebService(s) can handle CORS requests.
+
+	cors := CrossOriginResourceSharing{ExposeHeaders: []string{"X-My-Header"}, CookiesAllowed: false, Container: DefaultContainer}
+	Filter(cors.Filter)
+
+Error Handling
+
+Unexpected things happen. If a request cannot be processed because of a failure, your service needs to tell via the response what happened and why.
+For this reason HTTP status codes exist and it is important to use the correct code in every exceptional situation.
+
+	400: Bad Request
+
+If path or query parameters are not valid (content or type) then use http.StatusBadRequest.
+
+	404: Not Found
+
+Despite a valid URI, the resource requested may not be available
+
+	500: Internal Server Error
+
+If the application logic could not process the request (or write the response) then use http.StatusInternalServerError.
+
+	405: Method Not Allowed
+
+The request has a valid URL but the method (GET,PUT,POST,...) is not allowed.
+
+	406: Not Acceptable
+
+The request does not have or has an unknown Accept Header set for this operation.
+
+	415: Unsupported Media Type
+
+The request does not have or has an unknown Content-Type Header set for this operation.
+
+ServiceError
+
+In addition to setting the correct (error) Http status code, you can choose to write a ServiceError message on the response.
+
+Performance options
+
+This package has several options that affect the performance of your service. It is important to understand them and how you can change it.
+
+	restful.DefaultContainer.DoNotRecover(false)
+
+DoNotRecover controls whether panics will be caught to return HTTP 500.
+If set to false, the container will recover from panics.
+Default value is true
+
+	restful.SetCompressorProvider(NewBoundedCachedCompressors(20, 20))
+
+If content encoding is enabled then the default strategy for getting new gzip/zlib writers and readers is to use a sync.Pool.
+Because writers are expensive structures, performance is even more improved when using a preloaded cache. You can also inject your own implementation.
+
+Trouble shooting
+
+This package has the means to produce detail logging of the complete Http request matching process and filter invocation.
+Enabling this feature requires you to set an implementation of restful.StdLogger (e.g. log.Logger) instance such as:
+
+	restful.TraceLogger(log.New(os.Stdout, "[restful] ", log.LstdFlags|log.Lshortfile))
+
+Logging
+
+The restful.SetLogger() method allows you to override the logger used by the package. By default restful
+uses the standard library `log` package and logs to stdout. Different logging packages are supported as
+long as they conform to `StdLogger` interface defined in the `log` sub-package, writing an adapter for your
+preferred package is simple.
+
+Resources
+
+[project]: https://github.com/emicklei/go-restful
+
+[examples]: https://github.com/emicklei/go-restful/blob/master/examples
+
+[design]:  http://ernestmicklei.com/2012/11/11/go-restful-api-design/
+
+[showcases]: https://github.com/emicklei/mora, https://github.com/emicklei/landskape
+
+(c) 2012-2015, http://ernestmicklei.com. MIT License
+*/
+package restful
diff --git a/vendor/github.com/emicklei/go-restful/entity_accessors.go b/vendor/github.com/emicklei/go-restful/entity_accessors.go
new file mode 100644
index 00000000..66dfc824
--- /dev/null
+++ b/vendor/github.com/emicklei/go-restful/entity_accessors.go
@@ -0,0 +1,162 @@
+package restful
+
+// Copyright 2015 Ernest Micklei. All rights reserved.
+// Use of this source code is governed by a license
+// that can be found in the LICENSE file.
+
+import (
+	"encoding/xml"
+	"strings"
+	"sync"
+)
+
+// EntityReaderWriter can read and write values using an encoding such as JSON,XML.
+type EntityReaderWriter interface {
+	// Read a serialized version of the value from the request.
+	// The Request may have a decompressing reader. Depends on Content-Encoding.
+	Read(req *Request, v interface{}) error
+
+	// Write a serialized version of the value on the response.
+	// The Response may have a compressing writer. Depends on Accept-Encoding.
+	// status should be a valid Http Status code
+	Write(resp *Response, status int, v interface{}) error
+}
+
+// entityAccessRegistry is a singleton
+var entityAccessRegistry = &entityReaderWriters{
+	protection: new(sync.RWMutex),
+	accessors:  map[string]EntityReaderWriter{},
+}
+
+// entityReaderWriters associates MIME to an EntityReaderWriter
+type entityReaderWriters struct {
+	protection *sync.RWMutex
+	accessors  map[string]EntityReaderWriter
+}
+
+func init() {
+	RegisterEntityAccessor(MIME_JSON, NewEntityAccessorJSON(MIME_JSON))
+	RegisterEntityAccessor(MIME_XML, NewEntityAccessorXML(MIME_XML))
+}
+
+// RegisterEntityAccessor add/overrides the ReaderWriter for encoding content with this MIME type.
+func RegisterEntityAccessor(mime string, erw EntityReaderWriter) {
+	entityAccessRegistry.protection.Lock()
+	defer entityAccessRegistry.protection.Unlock()
+	entityAccessRegistry.accessors[mime] = erw
+}
+
+// NewEntityAccessorJSON returns a new EntityReaderWriter for accessing JSON content.
+// This package is already initialized with such an accessor using the MIME_JSON contentType.
+func NewEntityAccessorJSON(contentType string) EntityReaderWriter {
+	return entityJSONAccess{ContentType: contentType}
+}
+
+// NewEntityAccessorXML returns a new EntityReaderWriter for accessing XML content.
+// This package is already initialized with such an accessor using the MIME_XML contentType.
+func NewEntityAccessorXML(contentType string) EntityReaderWriter {
+	return entityXMLAccess{ContentType: contentType}
+}
+
+// accessorAt returns the registered ReaderWriter for this MIME type.
+func (r *entityReaderWriters) accessorAt(mime string) (EntityReaderWriter, bool) {
+	r.protection.RLock()
+	defer r.protection.RUnlock()
+	er, ok := r.accessors[mime]
+	if !ok {
+		// retry with reverse lookup
+		// more expensive but we are in an exceptional situation anyway
+		for k, v := range r.accessors {
+			if strings.Contains(mime, k) {
+				return v, true
+			}
+		}
+	}
+	return er, ok
+}
+
+// entityXMLAccess is a EntityReaderWriter for XML encoding
+type entityXMLAccess struct {
+	// This is used for setting the Content-Type header when writing
+	ContentType string
+}
+
+// Read unmarshalls the value from XML
+func (e entityXMLAccess) Read(req *Request, v interface{}) error {
+	return xml.NewDecoder(req.Request.Body).Decode(v)
+}
+
+// Write marshalls the value to JSON and set the Content-Type Header.
+func (e entityXMLAccess) Write(resp *Response, status int, v interface{}) error {
+	return writeXML(resp, status, e.ContentType, v)
+}
+
+// writeXML marshalls the value to JSON and set the Content-Type Header.
+func writeXML(resp *Response, status int, contentType string, v interface{}) error {
+	if v == nil {
+		resp.WriteHeader(status)
+		// do not write a nil representation
+		return nil
+	}
+	if resp.prettyPrint {
+		// pretty output must be created and written explicitly
+		output, err := xml.MarshalIndent(v, " ", " ")
+		if err != nil {
+			return err
+		}
+		resp.Header().Set(HEADER_ContentType, contentType)
+		resp.WriteHeader(status)
+		_, err = resp.Write([]byte(xml.Header))
+		if err != nil {
+			return err
+		}
+		_, err = resp.Write(output)
+		return err
+	}
+	// not-so-pretty
+	resp.Header().Set(HEADER_ContentType, contentType)
+	resp.WriteHeader(status)
+	return xml.NewEncoder(resp).Encode(v)
+}
+
+// entityJSONAccess is a EntityReaderWriter for JSON encoding
+type entityJSONAccess struct {
+	// This is used for setting the Content-Type header when writing
+	ContentType string
+}
+
+// Read unmarshalls the value from JSON
+func (e entityJSONAccess) Read(req *Request, v interface{}) error {
+	decoder := NewDecoder(req.Request.Body)
+	decoder.UseNumber()
+	return decoder.Decode(v)
+}
+
+// Write marshalls the value to JSON and set the Content-Type Header.
+func (e entityJSONAccess) Write(resp *Response, status int, v interface{}) error {
+	return writeJSON(resp, status, e.ContentType, v)
+}
+
+// write marshalls the value to JSON and set the Content-Type Header.
+func writeJSON(resp *Response, status int, contentType string, v interface{}) error {
+	if v == nil {
+		resp.WriteHeader(status)
+		// do not write a nil representation
+		return nil
+	}
+	if resp.prettyPrint {
+		// pretty output must be created and written explicitly
+		output, err := MarshalIndent(v, "", " ")
+		if err != nil {
+			return err
+		}
+		resp.Header().Set(HEADER_ContentType, contentType)
+		resp.WriteHeader(status)
+		_, err = resp.Write(output)
+		return err
+	}
+	// not-so-pretty
+	resp.Header().Set(HEADER_ContentType, contentType)
+	resp.WriteHeader(status)
+	return NewEncoder(resp).Encode(v)
+}
diff --git a/vendor/github.com/emicklei/go-restful/filter.go b/vendor/github.com/emicklei/go-restful/filter.go
new file mode 100644
index 00000000..c23bfb59
--- /dev/null
+++ b/vendor/github.com/emicklei/go-restful/filter.go
@@ -0,0 +1,35 @@
+package restful
+
+// Copyright 2013 Ernest Micklei. All rights reserved.
+// Use of this source code is governed by a license
+// that can be found in the LICENSE file.
+
+// FilterChain is a request scoped object to process one or more filters before calling the target RouteFunction.
+type FilterChain struct {
+	Filters []FilterFunction // ordered list of FilterFunction
+	Index   int              // index into filters that is currently in progress
+	Target  RouteFunction    // function to call after passing all filters
+}
+
+// ProcessFilter passes the request,response pair through the next of Filters.
+// Each filter can decide to proceed to the next Filter or handle the Response itself.
+func (f *FilterChain) ProcessFilter(request *Request, response *Response) {
+	if f.Index < len(f.Filters) {
+		f.Index++
+		f.Filters[f.Index-1](request, response, f)
+	} else {
+		f.Target(request, response)
+	}
+}
+
+// FilterFunction definitions must call ProcessFilter on the FilterChain to pass on the control and eventually call the RouteFunction
+type FilterFunction func(*Request, *Response, *FilterChain)
+
+// NoBrowserCacheFilter is a filter function to set HTTP headers that disable browser caching
+// See examples/restful-no-cache-filter.go for usage
+func NoBrowserCacheFilter(req *Request, resp *Response, chain *FilterChain) {
+	resp.Header().Set("Cache-Control", "no-cache, no-store, must-revalidate") // HTTP 1.1.
+	resp.Header().Set("Pragma", "no-cache")                                   // HTTP 1.0.
+	resp.Header().Set("Expires", "0")                                         // Proxies.
+	chain.ProcessFilter(req, resp)
+}
diff --git a/vendor/github.com/emicklei/go-restful/json.go b/vendor/github.com/emicklei/go-restful/json.go
new file mode 100644
index 00000000..87116516
--- /dev/null
+++ b/vendor/github.com/emicklei/go-restful/json.go
@@ -0,0 +1,11 @@
+// +build !jsoniter
+
+package restful
+
+import "encoding/json"
+
+var (
+	MarshalIndent = json.MarshalIndent
+	NewDecoder    = json.NewDecoder
+	NewEncoder    = json.NewEncoder
+)
diff --git a/vendor/github.com/emicklei/go-restful/jsoniter.go b/vendor/github.com/emicklei/go-restful/jsoniter.go
new file mode 100644
index 00000000..11b8f8ae
--- /dev/null
+++ b/vendor/github.com/emicklei/go-restful/jsoniter.go
@@ -0,0 +1,12 @@
+// +build jsoniter
+
+package restful
+
+import "github.com/json-iterator/go"
+
+var (
+	json          = jsoniter.ConfigCompatibleWithStandardLibrary
+	MarshalIndent = json.MarshalIndent
+	NewDecoder    = json.NewDecoder
+	NewEncoder    = json.NewEncoder
+)
diff --git a/vendor/github.com/emicklei/go-restful/jsr311.go b/vendor/github.com/emicklei/go-restful/jsr311.go
new file mode 100644
index 00000000..3ede1891
--- /dev/null
+++ b/vendor/github.com/emicklei/go-restful/jsr311.go
@@ -0,0 +1,297 @@
+package restful
+
+// Copyright 2013 Ernest Micklei. All rights reserved.
+// Use of this source code is governed by a license
+// that can be found in the LICENSE file.
+
+import (
+	"errors"
+	"fmt"
+	"net/http"
+	"sort"
+)
+
+// RouterJSR311 implements the flow for matching Requests to Routes (and consequently Resource Functions)
+// as specified by the JSR311 http://jsr311.java.net/nonav/releases/1.1/spec/spec.html.
+// RouterJSR311 implements the Router interface.
+// Concept of locators is not implemented.
+type RouterJSR311 struct{}
+
+// SelectRoute is part of the Router interface and returns the best match
+// for the WebService and its Route for the given Request.
+func (r RouterJSR311) SelectRoute(
+	webServices []*WebService,
+	httpRequest *http.Request) (selectedService *WebService, selectedRoute *Route, err error) {
+
+	// Identify the root resource class (WebService)
+	dispatcher, finalMatch, err := r.detectDispatcher(httpRequest.URL.Path, webServices)
+	if err != nil {
+		return nil, nil, NewError(http.StatusNotFound, "")
+	}
+	// Obtain the set of candidate methods (Routes)
+	routes := r.selectRoutes(dispatcher, finalMatch)
+	if len(routes) == 0 {
+		return dispatcher, nil, NewError(http.StatusNotFound, "404: Page Not Found")
+	}
+
+	// Identify the method (Route) that will handle the request
+	route, ok := r.detectRoute(routes, httpRequest)
+	return dispatcher, route, ok
+}
+
+// ExtractParameters is used to obtain the path parameters from the route using the same matching
+// engine as the JSR 311 router.
+func (r RouterJSR311) ExtractParameters(route *Route, webService *WebService, urlPath string) map[string]string {
+	webServiceExpr := webService.pathExpr
+	webServiceMatches := webServiceExpr.Matcher.FindStringSubmatch(urlPath)
+	pathParameters := r.extractParams(webServiceExpr, webServiceMatches)
+	routeExpr := route.pathExpr
+	routeMatches := routeExpr.Matcher.FindStringSubmatch(webServiceMatches[len(webServiceMatches)-1])
+	routeParams := r.extractParams(routeExpr, routeMatches)
+	for key, value := range routeParams {
+		pathParameters[key] = value
+	}
+	return pathParameters
+}
+
+func (RouterJSR311) extractParams(pathExpr *pathExpression, matches []string) map[string]string {
+	params := map[string]string{}
+	for i := 1; i < len(matches); i++ {
+		if len(pathExpr.VarNames) >= i {
+			params[pathExpr.VarNames[i-1]] = matches[i]
+		}
+	}
+	return params
+}
+
+// http://jsr311.java.net/nonav/releases/1.1/spec/spec3.html#x3-360003.7.2
+func (r RouterJSR311) detectRoute(routes []Route, httpRequest *http.Request) (*Route, error) {
+	candidates := make([]*Route, 0, 8)
+	for i, each := range routes {
+		ok := true
+		for _, fn := range each.If {
+			if !fn(httpRequest) {
+				ok = false
+				break
+			}
+		}
+		if ok {
+			candidates = append(candidates, &routes[i])
+		}
+	}
+	if len(candidates) == 0 {
+		if trace {
+			traceLogger.Printf("no Route found (from %d) that passes conditional checks", len(routes))
+		}
+		return nil, NewError(http.StatusNotFound, "404: Not Found")
+	}
+
+	// http method
+	previous := candidates
+	candidates = candidates[:0]
+	for _, each := range previous {
+		if httpRequest.Method == each.Method {
+			candidates = append(candidates, each)
+		}
+	}
+	if len(candidates) == 0 {
+		if trace {
+			traceLogger.Printf("no Route found (in %d routes) that matches HTTP method %s\n", len(previous), httpRequest.Method)
+		}
+		return nil, NewError(http.StatusMethodNotAllowed, "405: Method Not Allowed")
+	}
+
+	// content-type
+	contentType := httpRequest.Header.Get(HEADER_ContentType)
+	previous = candidates
+	candidates = candidates[:0]
+	for _, each := range previous {
+		if each.matchesContentType(contentType) {
+			candidates = append(candidates, each)
+		}
+	}
+	if len(candidates) == 0 {
+		if trace {
+			traceLogger.Printf("no Route found (from %d) that matches HTTP Content-Type: %s\n", len(previous), contentType)
+		}
+		if httpRequest.ContentLength > 0 {
+			return nil, NewError(http.StatusUnsupportedMediaType, "415: Unsupported Media Type")
+		}
+	}
+
+	// accept
+	previous = candidates
+	candidates = candidates[:0]
+	accept := httpRequest.Header.Get(HEADER_Accept)
+	if len(accept) == 0 {
+		accept = "*/*"
+	}
+	for _, each := range previous {
+		if each.matchesAccept(accept) {
+			candidates = append(candidates, each)
+		}
+	}
+	if len(candidates) == 0 {
+		if trace {
+			traceLogger.Printf("no Route found (from %d) that matches HTTP Accept: %s\n", len(previous), accept)
+		}
+		return nil, NewError(http.StatusNotAcceptable, "406: Not Acceptable")
+	}
+	// return r.bestMatchByMedia(outputMediaOk, contentType, accept), nil
+	return candidates[0], nil
+}
+
+// http://jsr311.java.net/nonav/releases/1.1/spec/spec3.html#x3-360003.7.2
+// n/m > n/* > */*
+func (r RouterJSR311) bestMatchByMedia(routes []Route, contentType string, accept string) *Route {
+	// TODO
+	return &routes[0]
+}
+
+// http://jsr311.java.net/nonav/releases/1.1/spec/spec3.html#x3-360003.7.2  (step 2)
+func (r RouterJSR311) selectRoutes(dispatcher *WebService, pathRemainder string) []Route {
+	filtered := &sortableRouteCandidates{}
+	for _, each := range dispatcher.Routes() {
+		pathExpr := each.pathExpr
+		matches := pathExpr.Matcher.FindStringSubmatch(pathRemainder)
+		if matches != nil {
+			lastMatch := matches[len(matches)-1]
+			if len(lastMatch) == 0 || lastMatch == "/" { // do not include if value is neither empty nor ‘/’.
+				filtered.candidates = append(filtered.candidates,
+					routeCandidate{each, len(matches) - 1, pathExpr.LiteralCount, pathExpr.VarCount})
+			}
+		}
+	}
+	if len(filtered.candidates) == 0 {
+		if trace {
+			traceLogger.Printf("WebService on path %s has no routes that match URL path remainder:%s\n", dispatcher.rootPath, pathRemainder)
+		}
+		return []Route{}
+	}
+	sort.Sort(sort.Reverse(filtered))
+
+	// select other routes from candidates whoes expression matches rmatch
+	matchingRoutes := []Route{filtered.candidates[0].route}
+	for c := 1; c < len(filtered.candidates); c++ {
+		each := filtered.candidates[c]
+		if each.route.pathExpr.Matcher.MatchString(pathRemainder) {
+			matchingRoutes = append(matchingRoutes, each.route)
+		}
+	}
+	return matchingRoutes
+}
+
+// http://jsr311.java.net/nonav/releases/1.1/spec/spec3.html#x3-360003.7.2 (step 1)
+func (r RouterJSR311) detectDispatcher(requestPath string, dispatchers []*WebService) (*WebService, string, error) {
+	filtered := &sortableDispatcherCandidates{}
+	for _, each := range dispatchers {
+		matches := each.pathExpr.Matcher.FindStringSubmatch(requestPath)
+		if matches != nil {
+			filtered.candidates = append(filtered.candidates,
+				dispatcherCandidate{each, matches[len(matches)-1], len(matches), each.pathExpr.LiteralCount, each.pathExpr.VarCount})
+		}
+	}
+	if len(filtered.candidates) == 0 {
+		if trace {
+			traceLogger.Printf("no WebService was found to match URL path:%s\n", requestPath)
+		}
+		return nil, "", errors.New("not found")
+	}
+	sort.Sort(sort.Reverse(filtered))
+	return filtered.candidates[0].dispatcher, filtered.candidates[0].finalMatch, nil
+}
+
+// Types and functions to support the sorting of Routes
+
+type routeCandidate struct {
+	route           Route
+	matchesCount    int // the number of capturing groups
+	literalCount    int // the number of literal characters (means those not resulting from template variable substitution)
+	nonDefaultCount int // the number of capturing groups with non-default regular expressions (i.e. not ‘([^  /]+?)’)
+}
+
+func (r routeCandidate) expressionToMatch() string {
+	return r.route.pathExpr.Source
+}
+
+func (r routeCandidate) String() string {
+	return fmt.Sprintf("(m=%d,l=%d,n=%d)", r.matchesCount, r.literalCount, r.nonDefaultCount)
+}
+
+type sortableRouteCandidates struct {
+	candidates []routeCandidate
+}
+
+func (rcs *sortableRouteCandidates) Len() int {
+	return len(rcs.candidates)
+}
+func (rcs *sortableRouteCandidates) Swap(i, j int) {
+	rcs.candidates[i], rcs.candidates[j] = rcs.candidates[j], rcs.candidates[i]
+}
+func (rcs *sortableRouteCandidates) Less(i, j int) bool {
+	ci := rcs.candidates[i]
+	cj := rcs.candidates[j]
+	// primary key
+	if ci.literalCount < cj.literalCount {
+		return true
+	}
+	if ci.literalCount > cj.literalCount {
+		return false
+	}
+	// secundary key
+	if ci.matchesCount < cj.matchesCount {
+		return true
+	}
+	if ci.matchesCount > cj.matchesCount {
+		return false
+	}
+	// tertiary key
+	if ci.nonDefaultCount < cj.nonDefaultCount {
+		return true
+	}
+	if ci.nonDefaultCount > cj.nonDefaultCount {
+		return false
+	}
+	// quaternary key ("source" is interpreted as Path)
+	return ci.route.Path < cj.route.Path
+}
+
+// Types and functions to support the sorting of Dispatchers
+
+type dispatcherCandidate struct {
+	dispatcher      *WebService
+	finalMatch      string
+	matchesCount    int // the number of capturing groups
+	literalCount    int // the number of literal characters (means those not resulting from template variable substitution)
+	nonDefaultCount int // the number of capturing groups with non-default regular expressions (i.e. not ‘([^  /]+?)’)
+}
+type sortableDispatcherCandidates struct {
+	candidates []dispatcherCandidate
+}
+
+func (dc *sortableDispatcherCandidates) Len() int {
+	return len(dc.candidates)
+}
+func (dc *sortableDispatcherCandidates) Swap(i, j int) {
+	dc.candidates[i], dc.candidates[j] = dc.candidates[j], dc.candidates[i]
+}
+func (dc *sortableDispatcherCandidates) Less(i, j int) bool {
+	ci := dc.candidates[i]
+	cj := dc.candidates[j]
+	// primary key
+	if ci.matchesCount < cj.matchesCount {
+		return true
+	}
+	if ci.matchesCount > cj.matchesCount {
+		return false
+	}
+	// secundary key
+	if ci.literalCount < cj.literalCount {
+		return true
+	}
+	if ci.literalCount > cj.literalCount {
+		return false
+	}
+	// tertiary key
+	return ci.nonDefaultCount < cj.nonDefaultCount
+}
diff --git a/vendor/github.com/emicklei/go-restful/log/log.go b/vendor/github.com/emicklei/go-restful/log/log.go
new file mode 100644
index 00000000..6cd44c7a
--- /dev/null
+++ b/vendor/github.com/emicklei/go-restful/log/log.go
@@ -0,0 +1,34 @@
+package log
+
+import (
+	stdlog "log"
+	"os"
+)
+
+// StdLogger corresponds to a minimal subset of the interface satisfied by stdlib log.Logger
+type StdLogger interface {
+	Print(v ...interface{})
+	Printf(format string, v ...interface{})
+}
+
+var Logger StdLogger
+
+func init() {
+	// default Logger
+	SetLogger(stdlog.New(os.Stderr, "[restful] ", stdlog.LstdFlags|stdlog.Lshortfile))
+}
+
+// SetLogger sets the logger for this package
+func SetLogger(customLogger StdLogger) {
+	Logger = customLogger
+}
+
+// Print delegates to the Logger
+func Print(v ...interface{}) {
+	Logger.Print(v...)
+}
+
+// Printf delegates to the Logger
+func Printf(format string, v ...interface{}) {
+	Logger.Printf(format, v...)
+}
diff --git a/vendor/github.com/emicklei/go-restful/logger.go b/vendor/github.com/emicklei/go-restful/logger.go
new file mode 100644
index 00000000..6595df00
--- /dev/null
+++ b/vendor/github.com/emicklei/go-restful/logger.go
@@ -0,0 +1,32 @@
+package restful
+
+// Copyright 2014 Ernest Micklei. All rights reserved.
+// Use of this source code is governed by a license
+// that can be found in the LICENSE file.
+import (
+	"github.com/emicklei/go-restful/log"
+)
+
+var trace bool = false
+var traceLogger log.StdLogger
+
+func init() {
+	traceLogger = log.Logger // use the package logger by default
+}
+
+// TraceLogger enables detailed logging of Http request matching and filter invocation. Default no logger is set.
+// You may call EnableTracing() directly to enable trace logging to the package-wide logger.
+func TraceLogger(logger log.StdLogger) {
+	traceLogger = logger
+	EnableTracing(logger != nil)
+}
+
+// SetLogger exposes the setter for the global logger on the top-level package
+func SetLogger(customLogger log.StdLogger) {
+	log.SetLogger(customLogger)
+}
+
+// EnableTracing can be used to Trace logging on and off.
+func EnableTracing(enabled bool) {
+	trace = enabled
+}
diff --git a/vendor/github.com/emicklei/go-restful/mime.go b/vendor/github.com/emicklei/go-restful/mime.go
new file mode 100644
index 00000000..33014471
--- /dev/null
+++ b/vendor/github.com/emicklei/go-restful/mime.go
@@ -0,0 +1,50 @@
+package restful
+
+import (
+	"strconv"
+	"strings"
+)
+
+type mime struct {
+	media   string
+	quality float64
+}
+
+// insertMime adds a mime to a list and keeps it sorted by quality.
+func insertMime(l []mime, e mime) []mime {
+	for i, each := range l {
+		// if current mime has lower quality then insert before
+		if e.quality > each.quality {
+			left := append([]mime{}, l[0:i]...)
+			return append(append(left, e), l[i:]...)
+		}
+	}
+	return append(l, e)
+}
+
+const qFactorWeightingKey = "q"
+
+// sortedMimes returns a list of mime sorted (desc) by its specified quality.
+// e.g. text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3
+func sortedMimes(accept string) (sorted []mime) {
+	for _, each := range strings.Split(accept, ",") {
+		typeAndQuality := strings.Split(strings.Trim(each, " "), ";")
+		if len(typeAndQuality) == 1 {
+			sorted = insertMime(sorted, mime{typeAndQuality[0], 1.0})
+		} else {
+			// take factor
+			qAndWeight := strings.Split(typeAndQuality[1], "=")
+			if len(qAndWeight) == 2 && strings.Trim(qAndWeight[0], " ") == qFactorWeightingKey {
+				f, err := strconv.ParseFloat(qAndWeight[1], 64)
+				if err != nil {
+					traceLogger.Printf("unable to parse quality in %s, %v", each, err)
+				} else {
+					sorted = insertMime(sorted, mime{typeAndQuality[0], f})
+				}
+			} else {
+				sorted = insertMime(sorted, mime{typeAndQuality[0], 1.0})
+			}
+		}
+	}
+	return
+}
diff --git a/vendor/github.com/emicklei/go-restful/options_filter.go b/vendor/github.com/emicklei/go-restful/options_filter.go
new file mode 100644
index 00000000..5c1b3425
--- /dev/null
+++ b/vendor/github.com/emicklei/go-restful/options_filter.go
@@ -0,0 +1,34 @@
+package restful
+
+import "strings"
+
+// Copyright 2013 Ernest Micklei. All rights reserved.
+// Use of this source code is governed by a license
+// that can be found in the LICENSE file.
+
+// OPTIONSFilter is a filter function that inspects the Http Request for the OPTIONS method
+// and provides the response with a set of allowed methods for the request URL Path.
+// As for any filter, you can also install it for a particular WebService within a Container.
+// Note: this filter is not needed when using CrossOriginResourceSharing (for CORS).
+func (c *Container) OPTIONSFilter(req *Request, resp *Response, chain *FilterChain) {
+	if "OPTIONS" != req.Request.Method {
+		chain.ProcessFilter(req, resp)
+		return
+	}
+
+	archs := req.Request.Header.Get(HEADER_AccessControlRequestHeaders)
+	methods := strings.Join(c.computeAllowedMethods(req), ",")
+	origin := req.Request.Header.Get(HEADER_Origin)
+
+	resp.AddHeader(HEADER_Allow, methods)
+	resp.AddHeader(HEADER_AccessControlAllowOrigin, origin)
+	resp.AddHeader(HEADER_AccessControlAllowHeaders, archs)
+	resp.AddHeader(HEADER_AccessControlAllowMethods, methods)
+}
+
+// OPTIONSFilter is a filter function that inspects the Http Request for the OPTIONS method
+// and provides the response with a set of allowed methods for the request URL Path.
+// Note: this filter is not needed when using CrossOriginResourceSharing (for CORS).
+func OPTIONSFilter() FilterFunction {
+	return DefaultContainer.OPTIONSFilter
+}
diff --git a/vendor/github.com/emicklei/go-restful/parameter.go b/vendor/github.com/emicklei/go-restful/parameter.go
new file mode 100644
index 00000000..e8793304
--- /dev/null
+++ b/vendor/github.com/emicklei/go-restful/parameter.go
@@ -0,0 +1,143 @@
+package restful
+
+// Copyright 2013 Ernest Micklei. All rights reserved.
+// Use of this source code is governed by a license
+// that can be found in the LICENSE file.
+
+const (
+	// PathParameterKind = indicator of Request parameter type "path"
+	PathParameterKind = iota
+
+	// QueryParameterKind = indicator of Request parameter type "query"
+	QueryParameterKind
+
+	// BodyParameterKind = indicator of Request parameter type "body"
+	BodyParameterKind
+
+	// HeaderParameterKind = indicator of Request parameter type "header"
+	HeaderParameterKind
+
+	// FormParameterKind = indicator of Request parameter type "form"
+	FormParameterKind
+
+	// CollectionFormatCSV comma separated values `foo,bar`
+	CollectionFormatCSV = CollectionFormat("csv")
+
+	// CollectionFormatSSV space separated values `foo bar`
+	CollectionFormatSSV = CollectionFormat("ssv")
+
+	// CollectionFormatTSV tab separated values `foo\tbar`
+	CollectionFormatTSV = CollectionFormat("tsv")
+
+	// CollectionFormatPipes pipe separated values `foo|bar`
+	CollectionFormatPipes = CollectionFormat("pipes")
+
+	// CollectionFormatMulti corresponds to multiple parameter instances instead of multiple values for a single
+	// instance `foo=bar&foo=baz`. This is valid only for QueryParameters and FormParameters
+	CollectionFormatMulti = CollectionFormat("multi")
+)
+
+type CollectionFormat string
+
+func (cf CollectionFormat) String() string {
+	return string(cf)
+}
+
+// Parameter is for documententing the parameter used in a Http Request
+// ParameterData kinds are Path,Query and Body
+type Parameter struct {
+	data *ParameterData
+}
+
+// ParameterData represents the state of a Parameter.
+// It is made public to make it accessible to e.g. the Swagger package.
+type ParameterData struct {
+	Name, Description, DataType, DataFormat string
+	Kind                                    int
+	Required                                bool
+	AllowableValues                         map[string]string
+	AllowMultiple                           bool
+	DefaultValue                            string
+	CollectionFormat                        string
+}
+
+// Data returns the state of the Parameter
+func (p *Parameter) Data() ParameterData {
+	return *p.data
+}
+
+// Kind returns the parameter type indicator (see const for valid values)
+func (p *Parameter) Kind() int {
+	return p.data.Kind
+}
+
+func (p *Parameter) bePath() *Parameter {
+	p.data.Kind = PathParameterKind
+	return p
+}
+func (p *Parameter) beQuery() *Parameter {
+	p.data.Kind = QueryParameterKind
+	return p
+}
+func (p *Parameter) beBody() *Parameter {
+	p.data.Kind = BodyParameterKind
+	return p
+}
+
+func (p *Parameter) beHeader() *Parameter {
+	p.data.Kind = HeaderParameterKind
+	return p
+}
+
+func (p *Parameter) beForm() *Parameter {
+	p.data.Kind = FormParameterKind
+	return p
+}
+
+// Required sets the required field and returns the receiver
+func (p *Parameter) Required(required bool) *Parameter {
+	p.data.Required = required
+	return p
+}
+
+// AllowMultiple sets the allowMultiple field and returns the receiver
+func (p *Parameter) AllowMultiple(multiple bool) *Parameter {
+	p.data.AllowMultiple = multiple
+	return p
+}
+
+// AllowableValues sets the allowableValues field and returns the receiver
+func (p *Parameter) AllowableValues(values map[string]string) *Parameter {
+	p.data.AllowableValues = values
+	return p
+}
+
+// DataType sets the dataType field and returns the receiver
+func (p *Parameter) DataType(typeName string) *Parameter {
+	p.data.DataType = typeName
+	return p
+}
+
+// DataFormat sets the dataFormat field for Swagger UI
+func (p *Parameter) DataFormat(formatName string) *Parameter {
+	p.data.DataFormat = formatName
+	return p
+}
+
+// DefaultValue sets the default value field and returns the receiver
+func (p *Parameter) DefaultValue(stringRepresentation string) *Parameter {
+	p.data.DefaultValue = stringRepresentation
+	return p
+}
+
+// Description sets the description value field and returns the receiver
+func (p *Parameter) Description(doc string) *Parameter {
+	p.data.Description = doc
+	return p
+}
+
+// CollectionFormat sets the collection format for an array type
+func (p *Parameter) CollectionFormat(format CollectionFormat) *Parameter {
+	p.data.CollectionFormat = format.String()
+	return p
+}
diff --git a/vendor/github.com/emicklei/go-restful/path_expression.go b/vendor/github.com/emicklei/go-restful/path_expression.go
new file mode 100644
index 00000000..95a9a254
--- /dev/null
+++ b/vendor/github.com/emicklei/go-restful/path_expression.go
@@ -0,0 +1,74 @@
+package restful
+
+// Copyright 2013 Ernest Micklei. All rights reserved.
+// Use of this source code is governed by a license
+// that can be found in the LICENSE file.
+
+import (
+	"bytes"
+	"fmt"
+	"regexp"
+	"strings"
+)
+
+// PathExpression holds a compiled path expression (RegExp) needed to match against
+// Http request paths and to extract path parameter values.
+type pathExpression struct {
+	LiteralCount int      // the number of literal characters (means those not resulting from template variable substitution)
+	VarNames     []string // the names of parameters (enclosed by {}) in the path
+	VarCount     int      // the number of named parameters (enclosed by {}) in the path
+	Matcher      *regexp.Regexp
+	Source       string // Path as defined by the RouteBuilder
+	tokens       []string
+}
+
+// NewPathExpression creates a PathExpression from the input URL path.
+// Returns an error if the path is invalid.
+func newPathExpression(path string) (*pathExpression, error) {
+	expression, literalCount, varNames, varCount, tokens := templateToRegularExpression(path)
+	compiled, err := regexp.Compile(expression)
+	if err != nil {
+		return nil, err
+	}
+	return &pathExpression{literalCount, varNames, varCount, compiled, expression, tokens}, nil
+}
+
+// http://jsr311.java.net/nonav/releases/1.1/spec/spec3.html#x3-370003.7.3
+func templateToRegularExpression(template string) (expression string, literalCount int, varNames []string, varCount int, tokens []string) {
+	var buffer bytes.Buffer
+	buffer.WriteString("^")
+	//tokens = strings.Split(template, "/")
+	tokens = tokenizePath(template)
+	for _, each := range tokens {
+		if each == "" {
+			continue
+		}
+		buffer.WriteString("/")
+		if strings.HasPrefix(each, "{") {
+			// check for regular expression in variable
+			colon := strings.Index(each, ":")
+			var varName string
+			if colon != -1 {
+				// extract expression
+				varName = strings.TrimSpace(each[1:colon])
+				paramExpr := strings.TrimSpace(each[colon+1 : len(each)-1])
+				if paramExpr == "*" { // special case
+					buffer.WriteString("(.*)")
+				} else {
+					buffer.WriteString(fmt.Sprintf("(%s)", paramExpr)) // between colon and closing moustache
+				}
+			} else {
+				// plain var
+				varName = strings.TrimSpace(each[1 : len(each)-1])
+				buffer.WriteString("([^/]+?)")
+			}
+			varNames = append(varNames, varName)
+			varCount += 1
+		} else {
+			literalCount += len(each)
+			encoded := each // TODO URI encode
+			buffer.WriteString(regexp.QuoteMeta(encoded))
+		}
+	}
+	return strings.TrimRight(buffer.String(), "/") + "(/.*)?$", literalCount, varNames, varCount, tokens
+}
diff --git a/vendor/github.com/emicklei/go-restful/path_processor.go b/vendor/github.com/emicklei/go-restful/path_processor.go
new file mode 100644
index 00000000..357c723a
--- /dev/null
+++ b/vendor/github.com/emicklei/go-restful/path_processor.go
@@ -0,0 +1,63 @@
+package restful
+
+import (
+	"bytes"
+	"strings"
+)
+
+// Copyright 2018 Ernest Micklei. All rights reserved.
+// Use of this source code is governed by a license
+// that can be found in the LICENSE file.
+
+// PathProcessor is extra behaviour that a Router can provide to extract path parameters from the path.
+// If a Router does not implement this interface then the default behaviour will be used.
+type PathProcessor interface {
+	// ExtractParameters gets the path parameters defined in the route and webService from the urlPath
+	ExtractParameters(route *Route, webService *WebService, urlPath string) map[string]string
+}
+
+type defaultPathProcessor struct{}
+
+// Extract the parameters from the request url path
+func (d defaultPathProcessor) ExtractParameters(r *Route, _ *WebService, urlPath string) map[string]string {
+	urlParts := tokenizePath(urlPath)
+	pathParameters := map[string]string{}
+	for i, key := range r.pathParts {
+		var value string
+		if i >= len(urlParts) {
+			value = ""
+		} else {
+			value = urlParts[i]
+		}
+		if strings.HasPrefix(key, "{") { // path-parameter
+			if colon := strings.Index(key, ":"); colon != -1 {
+				// extract by regex
+				regPart := key[colon+1 : len(key)-1]
+				keyPart := key[1:colon]
+				if regPart == "*" {
+					pathParameters[keyPart] = untokenizePath(i, urlParts)
+					break
+				} else {
+					pathParameters[keyPart] = value
+				}
+			} else {
+				// without enclosing {}
+				pathParameters[key[1:len(key)-1]] = value
+			}
+		}
+	}
+	return pathParameters
+}
+
+// Untokenize back into an URL path using the slash separator
+func untokenizePath(offset int, parts []string) string {
+	var buffer bytes.Buffer
+	for p := offset; p < len(parts); p++ {
+		buffer.WriteString(parts[p])
+		// do not end
+		if p < len(parts)-1 {
+			buffer.WriteString("/")
+		}
+	}
+	return buffer.String()
+}
diff --git a/vendor/github.com/emicklei/go-restful/request.go b/vendor/github.com/emicklei/go-restful/request.go
new file mode 100644
index 00000000..a20730fe
--- /dev/null
+++ b/vendor/github.com/emicklei/go-restful/request.go
@@ -0,0 +1,118 @@
+package restful
+
+// Copyright 2013 Ernest Micklei. All rights reserved.
+// Use of this source code is governed by a license
+// that can be found in the LICENSE file.
+
+import (
+	"compress/zlib"
+	"net/http"
+)
+
+var defaultRequestContentType string
+
+// Request is a wrapper for a http Request that provides convenience methods
+type Request struct {
+	Request           *http.Request
+	pathParameters    map[string]string
+	attributes        map[string]interface{} // for storing request-scoped values
+	selectedRoutePath string                 // root path + route path that matched the request, e.g. /meetings/{id}/attendees
+}
+
+func NewRequest(httpRequest *http.Request) *Request {
+	return &Request{
+		Request:        httpRequest,
+		pathParameters: map[string]string{},
+		attributes:     map[string]interface{}{},
+	} // empty parameters, attributes
+}
+
+// If ContentType is missing or */* is given then fall back to this type, otherwise
+// a "Unable to unmarshal content of type:" response is returned.
+// Valid values are restful.MIME_JSON and restful.MIME_XML
+// Example:
+// 	restful.DefaultRequestContentType(restful.MIME_JSON)
+func DefaultRequestContentType(mime string) {
+	defaultRequestContentType = mime
+}
+
+// PathParameter accesses the Path parameter value by its name
+func (r *Request) PathParameter(name string) string {
+	return r.pathParameters[name]
+}
+
+// PathParameters accesses the Path parameter values
+func (r *Request) PathParameters() map[string]string {
+	return r.pathParameters
+}
+
+// QueryParameter returns the (first) Query parameter value by its name
+func (r *Request) QueryParameter(name string) string {
+	return r.Request.FormValue(name)
+}
+
+// QueryParameters returns the all the query parameters values by name
+func (r *Request) QueryParameters(name string) []string {
+	return r.Request.URL.Query()[name]
+}
+
+// BodyParameter parses the body of the request (once for typically a POST or a PUT) and returns the value of the given name or an error.
+func (r *Request) BodyParameter(name string) (string, error) {
+	err := r.Request.ParseForm()
+	if err != nil {
+		return "", err
+	}
+	return r.Request.PostFormValue(name), nil
+}
+
+// HeaderParameter returns the HTTP Header value of a Header name or empty if missing
+func (r *Request) HeaderParameter(name string) string {
+	return r.Request.Header.Get(name)
+}
+
+// ReadEntity checks the Accept header and reads the content into the entityPointer.
+func (r *Request) ReadEntity(entityPointer interface{}) (err error) {
+	contentType := r.Request.Header.Get(HEADER_ContentType)
+	contentEncoding := r.Request.Header.Get(HEADER_ContentEncoding)
+
+	// check if the request body needs decompression
+	if ENCODING_GZIP == contentEncoding {
+		gzipReader := currentCompressorProvider.AcquireGzipReader()
+		defer currentCompressorProvider.ReleaseGzipReader(gzipReader)
+		gzipReader.Reset(r.Request.Body)
+		r.Request.Body = gzipReader
+	} else if ENCODING_DEFLATE == contentEncoding {
+		zlibReader, err := zlib.NewReader(r.Request.Body)
+		if err != nil {
+			return err
+		}
+		r.Request.Body = zlibReader
+	}
+
+	// lookup the EntityReader, use defaultRequestContentType if needed and provided
+	entityReader, ok := entityAccessRegistry.accessorAt(contentType)
+	if !ok {
+		if len(defaultRequestContentType) != 0 {
+			entityReader, ok = entityAccessRegistry.accessorAt(defaultRequestContentType)
+		}
+		if !ok {
+			return NewError(http.StatusBadRequest, "Unable to unmarshal content of type:"+contentType)
+		}
+	}
+	return entityReader.Read(r, entityPointer)
+}
+
+// SetAttribute adds or replaces the attribute with the given value.
+func (r *Request) SetAttribute(name string, value interface{}) {
+	r.attributes[name] = value
+}
+
+// Attribute returns the value associated to the given name. Returns nil if absent.
+func (r Request) Attribute(name string) interface{} {
+	return r.attributes[name]
+}
+
+// SelectedRoutePath root path + route path that matched the request, e.g. /meetings/{id}/attendees
+func (r Request) SelectedRoutePath() string {
+	return r.selectedRoutePath
+}
diff --git a/vendor/github.com/emicklei/go-restful/response.go b/vendor/github.com/emicklei/go-restful/response.go
new file mode 100644
index 00000000..fbb48f2d
--- /dev/null
+++ b/vendor/github.com/emicklei/go-restful/response.go
@@ -0,0 +1,255 @@
+package restful
+
+// Copyright 2013 Ernest Micklei. All rights reserved.
+// Use of this source code is governed by a license
+// that can be found in the LICENSE file.
+
+import (
+	"bufio"
+	"errors"
+	"net"
+	"net/http"
+)
+
+// DefaultResponseMimeType is DEPRECATED, use DefaultResponseContentType(mime)
+var DefaultResponseMimeType string
+
+//PrettyPrintResponses controls the indentation feature of XML and JSON serialization
+var PrettyPrintResponses = true
+
+// Response is a wrapper on the actual http ResponseWriter
+// It provides several convenience methods to prepare and write response content.
+type Response struct {
+	http.ResponseWriter
+	requestAccept string        // mime-type what the Http Request says it wants to receive
+	routeProduces []string      // mime-types what the Route says it can produce
+	statusCode    int           // HTTP status code that has been written explicitly (if zero then net/http has written 200)
+	contentLength int           // number of bytes written for the response body
+	prettyPrint   bool          // controls the indentation feature of XML and JSON serialization. It is initialized using var PrettyPrintResponses.
+	err           error         // err property is kept when WriteError is called
+	hijacker      http.Hijacker // if underlying ResponseWriter supports it
+}
+
+// NewResponse creates a new response based on a http ResponseWriter.
+func NewResponse(httpWriter http.ResponseWriter) *Response {
+	hijacker, _ := httpWriter.(http.Hijacker)
+	return &Response{ResponseWriter: httpWriter, routeProduces: []string{}, statusCode: http.StatusOK, prettyPrint: PrettyPrintResponses, hijacker: hijacker}
+}
+
+// DefaultResponseContentType set a default.
+// If Accept header matching fails, fall back to this type.
+// Valid values are restful.MIME_JSON and restful.MIME_XML
+// Example:
+// 	restful.DefaultResponseContentType(restful.MIME_JSON)
+func DefaultResponseContentType(mime string) {
+	DefaultResponseMimeType = mime
+}
+
+// InternalServerError writes the StatusInternalServerError header.
+// DEPRECATED, use WriteErrorString(http.StatusInternalServerError,reason)
+func (r Response) InternalServerError() Response {
+	r.WriteHeader(http.StatusInternalServerError)
+	return r
+}
+
+// Hijack implements the http.Hijacker interface.  This expands
+// the Response to fulfill http.Hijacker if the underlying
+// http.ResponseWriter supports it.
+func (r *Response) Hijack() (net.Conn, *bufio.ReadWriter, error) {
+	if r.hijacker == nil {
+		return nil, nil, errors.New("http.Hijacker not implemented by underlying http.ResponseWriter")
+	}
+	return r.hijacker.Hijack()
+}
+
+// PrettyPrint changes whether this response must produce pretty (line-by-line, indented) JSON or XML output.
+func (r *Response) PrettyPrint(bePretty bool) {
+	r.prettyPrint = bePretty
+}
+
+// AddHeader is a shortcut for .Header().Add(header,value)
+func (r Response) AddHeader(header string, value string) Response {
+	r.Header().Add(header, value)
+	return r
+}
+
+// SetRequestAccepts tells the response what Mime-type(s) the HTTP request said it wants to accept. Exposed for testing.
+func (r *Response) SetRequestAccepts(mime string) {
+	r.requestAccept = mime
+}
+
+// EntityWriter returns the registered EntityWriter that the entity (requested resource)
+// can write according to what the request wants (Accept) and what the Route can produce or what the restful defaults say.
+// If called before WriteEntity and WriteHeader then a false return value can be used to write a 406: Not Acceptable.
+func (r *Response) EntityWriter() (EntityReaderWriter, bool) {
+	sorted := sortedMimes(r.requestAccept)
+	for _, eachAccept := range sorted {
+		for _, eachProduce := range r.routeProduces {
+			if eachProduce == eachAccept.media {
+				if w, ok := entityAccessRegistry.accessorAt(eachAccept.media); ok {
+					return w, true
+				}
+			}
+		}
+		if eachAccept.media == "*/*" {
+			for _, each := range r.routeProduces {
+				if w, ok := entityAccessRegistry.accessorAt(each); ok {
+					return w, true
+				}
+			}
+		}
+	}
+	// if requestAccept is empty
+	writer, ok := entityAccessRegistry.accessorAt(r.requestAccept)
+	if !ok {
+		// if not registered then fallback to the defaults (if set)
+		if DefaultResponseMimeType == MIME_JSON {
+			return entityAccessRegistry.accessorAt(MIME_JSON)
+		}
+		if DefaultResponseMimeType == MIME_XML {
+			return entityAccessRegistry.accessorAt(MIME_XML)
+		}
+		// Fallback to whatever the route says it can produce.
+		// https://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html
+		for _, each := range r.routeProduces {
+			if w, ok := entityAccessRegistry.accessorAt(each); ok {
+				return w, true
+			}
+		}
+		if trace {
+			traceLogger.Printf("no registered EntityReaderWriter found for %s", r.requestAccept)
+		}
+	}
+	return writer, ok
+}
+
+// WriteEntity calls WriteHeaderAndEntity with Http Status OK (200)
+func (r *Response) WriteEntity(value interface{}) error {
+	return r.WriteHeaderAndEntity(http.StatusOK, value)
+}
+
+// WriteHeaderAndEntity marshals the value using the representation denoted by the Accept Header and the registered EntityWriters.
+// If no Accept header is specified (or */*) then respond with the Content-Type as specified by the first in the Route.Produces.
+// If an Accept header is specified then respond with the Content-Type as specified by the first in the Route.Produces that is matched with the Accept header.
+// If the value is nil then no response is send except for the Http status. You may want to call WriteHeader(http.StatusNotFound) instead.
+// If there is no writer available that can represent the value in the requested MIME type then Http Status NotAcceptable is written.
+// Current implementation ignores any q-parameters in the Accept Header.
+// Returns an error if the value could not be written on the response.
+func (r *Response) WriteHeaderAndEntity(status int, value interface{}) error {
+	writer, ok := r.EntityWriter()
+	if !ok {
+		r.WriteHeader(http.StatusNotAcceptable)
+		return nil
+	}
+	return writer.Write(r, status, value)
+}
+
+// WriteAsXml is a convenience method for writing a value in xml (requires Xml tags on the value)
+// It uses the standard encoding/xml package for marshalling the value ; not using a registered EntityReaderWriter.
+func (r *Response) WriteAsXml(value interface{}) error {
+	return writeXML(r, http.StatusOK, MIME_XML, value)
+}
+
+// WriteHeaderAndXml is a convenience method for writing a status and value in xml (requires Xml tags on the value)
+// It uses the standard encoding/xml package for marshalling the value ; not using a registered EntityReaderWriter.
+func (r *Response) WriteHeaderAndXml(status int, value interface{}) error {
+	return writeXML(r, status, MIME_XML, value)
+}
+
+// WriteAsJson is a convenience method for writing a value in json.
+// It uses the standard encoding/json package for marshalling the value ; not using a registered EntityReaderWriter.
+func (r *Response) WriteAsJson(value interface{}) error {
+	return writeJSON(r, http.StatusOK, MIME_JSON, value)
+}
+
+// WriteJson is a convenience method for writing a value in Json with a given Content-Type.
+// It uses the standard encoding/json package for marshalling the value ; not using a registered EntityReaderWriter.
+func (r *Response) WriteJson(value interface{}, contentType string) error {
+	return writeJSON(r, http.StatusOK, contentType, value)
+}
+
+// WriteHeaderAndJson is a convenience method for writing the status and a value in Json with a given Content-Type.
+// It uses the standard encoding/json package for marshalling the value ; not using a registered EntityReaderWriter.
+func (r *Response) WriteHeaderAndJson(status int, value interface{}, contentType string) error {
+	return writeJSON(r, status, contentType, value)
+}
+
+// WriteError write the http status and the error string on the response. err can be nil.
+func (r *Response) WriteError(httpStatus int, err error) error {
+	r.err = err
+	if err == nil {
+		r.WriteErrorString(httpStatus, "")
+	} else {
+		r.WriteErrorString(httpStatus, err.Error())
+	}
+	return err
+}
+
+// WriteServiceError is a convenience method for a responding with a status and a ServiceError
+func (r *Response) WriteServiceError(httpStatus int, err ServiceError) error {
+	r.err = err
+	return r.WriteHeaderAndEntity(httpStatus, err)
+}
+
+// WriteErrorString is a convenience method for an error status with the actual error
+func (r *Response) WriteErrorString(httpStatus int, errorReason string) error {
+	if r.err == nil {
+		// if not called from WriteError
+		r.err = errors.New(errorReason)
+	}
+	r.WriteHeader(httpStatus)
+	if _, err := r.Write([]byte(errorReason)); err != nil {
+		return err
+	}
+	return nil
+}
+
+// Flush implements http.Flusher interface, which sends any buffered data to the client.
+func (r *Response) Flush() {
+	if f, ok := r.ResponseWriter.(http.Flusher); ok {
+		f.Flush()
+	} else if trace {
+		traceLogger.Printf("ResponseWriter %v doesn't support Flush", r)
+	}
+}
+
+// WriteHeader is overridden to remember the Status Code that has been written.
+// Changes to the Header of the response have no effect after this.
+func (r *Response) WriteHeader(httpStatus int) {
+	r.statusCode = httpStatus
+	r.ResponseWriter.WriteHeader(httpStatus)
+}
+
+// StatusCode returns the code that has been written using WriteHeader.
+func (r Response) StatusCode() int {
+	if 0 == r.statusCode {
+		// no status code has been written yet; assume OK
+		return http.StatusOK
+	}
+	return r.statusCode
+}
+
+// Write writes the data to the connection as part of an HTTP reply.
+// Write is part of http.ResponseWriter interface.
+func (r *Response) Write(bytes []byte) (int, error) {
+	written, err := r.ResponseWriter.Write(bytes)
+	r.contentLength += written
+	return written, err
+}
+
+// ContentLength returns the number of bytes written for the response content.
+// Note that this value is only correct if all data is written through the Response using its Write* methods.
+// Data written directly using the underlying http.ResponseWriter is not accounted for.
+func (r Response) ContentLength() int {
+	return r.contentLength
+}
+
+// CloseNotify is part of http.CloseNotifier interface
+func (r Response) CloseNotify() <-chan bool {
+	return r.ResponseWriter.(http.CloseNotifier).CloseNotify()
+}
+
+// Error returns the err created by WriteError
+func (r Response) Error() error {
+	return r.err
+}
diff --git a/vendor/github.com/emicklei/go-restful/route.go b/vendor/github.com/emicklei/go-restful/route.go
new file mode 100644
index 00000000..6d15dbf6
--- /dev/null
+++ b/vendor/github.com/emicklei/go-restful/route.go
@@ -0,0 +1,170 @@
+package restful
+
+// Copyright 2013 Ernest Micklei. All rights reserved.
+// Use of this source code is governed by a license
+// that can be found in the LICENSE file.
+
+import (
+	"net/http"
+	"strings"
+)
+
+// RouteFunction declares the signature of a function that can be bound to a Route.
+type RouteFunction func(*Request, *Response)
+
+// RouteSelectionConditionFunction declares the signature of a function that
+// can be used to add extra conditional logic when selecting whether the route
+// matches the HTTP request.
+type RouteSelectionConditionFunction func(httpRequest *http.Request) bool
+
+// Route binds a HTTP Method,Path,Consumes combination to a RouteFunction.
+type Route struct {
+	Method   string
+	Produces []string
+	Consumes []string
+	Path     string // webservice root path + described path
+	Function RouteFunction
+	Filters  []FilterFunction
+	If       []RouteSelectionConditionFunction
+
+	// cached values for dispatching
+	relativePath string
+	pathParts    []string
+	pathExpr     *pathExpression // cached compilation of relativePath as RegExp
+
+	// documentation
+	Doc                     string
+	Notes                   string
+	Operation               string
+	ParameterDocs           []*Parameter
+	ResponseErrors          map[int]ResponseError
+	DefaultResponse         *ResponseError
+	ReadSample, WriteSample interface{} // structs that model an example request or response payload
+
+	// Extra information used to store custom information about the route.
+	Metadata map[string]interface{}
+
+	// marks a route as deprecated
+	Deprecated bool
+
+	//Overrides the container.contentEncodingEnabled
+	contentEncodingEnabled *bool
+}
+
+// Initialize for Route
+func (r *Route) postBuild() {
+	r.pathParts = tokenizePath(r.Path)
+}
+
+// Create Request and Response from their http versions
+func (r *Route) wrapRequestResponse(httpWriter http.ResponseWriter, httpRequest *http.Request, pathParams map[string]string) (*Request, *Response) {
+	wrappedRequest := NewRequest(httpRequest)
+	wrappedRequest.pathParameters = pathParams
+	wrappedRequest.selectedRoutePath = r.Path
+	wrappedResponse := NewResponse(httpWriter)
+	wrappedResponse.requestAccept = httpRequest.Header.Get(HEADER_Accept)
+	wrappedResponse.routeProduces = r.Produces
+	return wrappedRequest, wrappedResponse
+}
+
+// dispatchWithFilters call the function after passing through its own filters
+func (r *Route) dispatchWithFilters(wrappedRequest *Request, wrappedResponse *Response) {
+	if len(r.Filters) > 0 {
+		chain := FilterChain{Filters: r.Filters, Target: r.Function}
+		chain.ProcessFilter(wrappedRequest, wrappedResponse)
+	} else {
+		// unfiltered
+		r.Function(wrappedRequest, wrappedResponse)
+	}
+}
+
+func stringTrimSpaceCutset(r rune) bool {
+	return r == ' '
+}
+
+// Return whether the mimeType matches to what this Route can produce.
+func (r Route) matchesAccept(mimeTypesWithQuality string) bool {
+	remaining := mimeTypesWithQuality
+	for {
+		var mimeType string
+		if end := strings.Index(remaining, ","); end == -1 {
+			mimeType, remaining = remaining, ""
+		} else {
+			mimeType, remaining = remaining[:end], remaining[end+1:]
+		}
+		if quality := strings.Index(mimeType, ";"); quality != -1 {
+			mimeType = mimeType[:quality]
+		}
+		mimeType = strings.TrimFunc(mimeType, stringTrimSpaceCutset)
+		if mimeType == "*/*" {
+			return true
+		}
+		for _, producibleType := range r.Produces {
+			if producibleType == "*/*" || producibleType == mimeType {
+				return true
+			}
+		}
+		if len(remaining) == 0 {
+			return false
+		}
+	}
+}
+
+// Return whether this Route can consume content with a type specified by mimeTypes (can be empty).
+func (r Route) matchesContentType(mimeTypes string) bool {
+
+	if len(r.Consumes) == 0 {
+		// did not specify what it can consume ;  any media type (“*/*”) is assumed
+		return true
+	}
+
+	if len(mimeTypes) == 0 {
+		// idempotent methods with (most-likely or guaranteed) empty content match missing Content-Type
+		m := r.Method
+		if m == "GET" || m == "HEAD" || m == "OPTIONS" || m == "DELETE" || m == "TRACE" {
+			return true
+		}
+		// proceed with default
+		mimeTypes = MIME_OCTET
+	}
+
+	remaining := mimeTypes
+	for {
+		var mimeType string
+		if end := strings.Index(remaining, ","); end == -1 {
+			mimeType, remaining = remaining, ""
+		} else {
+			mimeType, remaining = remaining[:end], remaining[end+1:]
+		}
+		if quality := strings.Index(mimeType, ";"); quality != -1 {
+			mimeType = mimeType[:quality]
+		}
+		mimeType = strings.TrimFunc(mimeType, stringTrimSpaceCutset)
+		for _, consumeableType := range r.Consumes {
+			if consumeableType == "*/*" || consumeableType == mimeType {
+				return true
+			}
+		}
+		if len(remaining) == 0 {
+			return false
+		}
+	}
+}
+
+// Tokenize an URL path using the slash separator ; the result does not have empty tokens
+func tokenizePath(path string) []string {
+	if "/" == path {
+		return nil
+	}
+	return strings.Split(strings.Trim(path, "/"), "/")
+}
+
+// for debugging
+func (r Route) String() string {
+	return r.Method + " " + r.Path
+}
+
+// EnableContentEncoding (default=false) allows for GZIP or DEFLATE encoding of responses. Overrides the container.contentEncodingEnabled value.
+func (r Route) EnableContentEncoding(enabled bool) {
+	r.contentEncodingEnabled = &enabled
+}
diff --git a/vendor/github.com/emicklei/go-restful/route_builder.go b/vendor/github.com/emicklei/go-restful/route_builder.go
new file mode 100644
index 00000000..0fccf61e
--- /dev/null
+++ b/vendor/github.com/emicklei/go-restful/route_builder.go
@@ -0,0 +1,326 @@
+package restful
+
+// Copyright 2013 Ernest Micklei. All rights reserved.
+// Use of this source code is governed by a license
+// that can be found in the LICENSE file.
+
+import (
+	"fmt"
+	"os"
+	"reflect"
+	"runtime"
+	"strings"
+	"sync/atomic"
+
+	"github.com/emicklei/go-restful/log"
+)
+
+// RouteBuilder is a helper to construct Routes.
+type RouteBuilder struct {
+	rootPath    string
+	currentPath string
+	produces    []string
+	consumes    []string
+	httpMethod  string        // required
+	function    RouteFunction // required
+	filters     []FilterFunction
+	conditions  []RouteSelectionConditionFunction
+
+	typeNameHandleFunc TypeNameHandleFunction // required
+
+	// documentation
+	doc                     string
+	notes                   string
+	operation               string
+	readSample, writeSample interface{}
+	parameters              []*Parameter
+	errorMap                map[int]ResponseError
+	defaultResponse         *ResponseError
+	metadata                map[string]interface{}
+	deprecated              bool
+	contentEncodingEnabled  *bool
+}
+
+// Do evaluates each argument with the RouteBuilder itself.
+// This allows you to follow DRY principles without breaking the fluent programming style.
+// Example:
+// 		ws.Route(ws.DELETE("/{name}").To(t.deletePerson).Do(Returns200, Returns500))
+//
+//		func Returns500(b *RouteBuilder) {
+//			b.Returns(500, "Internal Server Error", restful.ServiceError{})
+//		}
+func (b *RouteBuilder) Do(oneArgBlocks ...func(*RouteBuilder)) *RouteBuilder {
+	for _, each := range oneArgBlocks {
+		each(b)
+	}
+	return b
+}
+
+// To bind the route to a function.
+// If this route is matched with the incoming Http Request then call this function with the *Request,*Response pair. Required.
+func (b *RouteBuilder) To(function RouteFunction) *RouteBuilder {
+	b.function = function
+	return b
+}
+
+// Method specifies what HTTP method to match. Required.
+func (b *RouteBuilder) Method(method string) *RouteBuilder {
+	b.httpMethod = method
+	return b
+}
+
+// Produces specifies what MIME types can be produced ; the matched one will appear in the Content-Type Http header.
+func (b *RouteBuilder) Produces(mimeTypes ...string) *RouteBuilder {
+	b.produces = mimeTypes
+	return b
+}
+
+// Consumes specifies what MIME types can be consumes ; the Accept Http header must matched any of these
+func (b *RouteBuilder) Consumes(mimeTypes ...string) *RouteBuilder {
+	b.consumes = mimeTypes
+	return b
+}
+
+// Path specifies the relative (w.r.t WebService root path) URL path to match. Default is "/".
+func (b *RouteBuilder) Path(subPath string) *RouteBuilder {
+	b.currentPath = subPath
+	return b
+}
+
+// Doc tells what this route is all about. Optional.
+func (b *RouteBuilder) Doc(documentation string) *RouteBuilder {
+	b.doc = documentation
+	return b
+}
+
+// Notes is a verbose explanation of the operation behavior. Optional.
+func (b *RouteBuilder) Notes(notes string) *RouteBuilder {
+	b.notes = notes
+	return b
+}
+
+// Reads tells what resource type will be read from the request payload. Optional.
+// A parameter of type "body" is added ,required is set to true and the dataType is set to the qualified name of the sample's type.
+func (b *RouteBuilder) Reads(sample interface{}, optionalDescription ...string) *RouteBuilder {
+	fn := b.typeNameHandleFunc
+	if fn == nil {
+		fn = reflectTypeName
+	}
+	typeAsName := fn(sample)
+	description := ""
+	if len(optionalDescription) > 0 {
+		description = optionalDescription[0]
+	}
+	b.readSample = sample
+	bodyParameter := &Parameter{&ParameterData{Name: "body", Description: description}}
+	bodyParameter.beBody()
+	bodyParameter.Required(true)
+	bodyParameter.DataType(typeAsName)
+	b.Param(bodyParameter)
+	return b
+}
+
+// ParameterNamed returns a Parameter already known to the RouteBuilder. Returns nil if not.
+// Use this to modify or extend information for the Parameter (through its Data()).
+func (b RouteBuilder) ParameterNamed(name string) (p *Parameter) {
+	for _, each := range b.parameters {
+		if each.Data().Name == name {
+			return each
+		}
+	}
+	return p
+}
+
+// Writes tells what resource type will be written as the response payload. Optional.
+func (b *RouteBuilder) Writes(sample interface{}) *RouteBuilder {
+	b.writeSample = sample
+	return b
+}
+
+// Param allows you to document the parameters of the Route. It adds a new Parameter (does not check for duplicates).
+func (b *RouteBuilder) Param(parameter *Parameter) *RouteBuilder {
+	if b.parameters == nil {
+		b.parameters = []*Parameter{}
+	}
+	b.parameters = append(b.parameters, parameter)
+	return b
+}
+
+// Operation allows you to document what the actual method/function call is of the Route.
+// Unless called, the operation name is derived from the RouteFunction set using To(..).
+func (b *RouteBuilder) Operation(name string) *RouteBuilder {
+	b.operation = name
+	return b
+}
+
+// ReturnsError is deprecated, use Returns instead.
+func (b *RouteBuilder) ReturnsError(code int, message string, model interface{}) *RouteBuilder {
+	log.Print("ReturnsError is deprecated, use Returns instead.")
+	return b.Returns(code, message, model)
+}
+
+// Returns allows you to document what responses (errors or regular) can be expected.
+// The model parameter is optional ; either pass a struct instance or use nil if not applicable.
+func (b *RouteBuilder) Returns(code int, message string, model interface{}) *RouteBuilder {
+	err := ResponseError{
+		Code:      code,
+		Message:   message,
+		Model:     model,
+		IsDefault: false, // this field is deprecated, use default response instead.
+	}
+	// lazy init because there is no NewRouteBuilder (yet)
+	if b.errorMap == nil {
+		b.errorMap = map[int]ResponseError{}
+	}
+	b.errorMap[code] = err
+	return b
+}
+
+// DefaultReturns is a special Returns call that sets the default of the response.
+func (b *RouteBuilder) DefaultReturns(message string, model interface{}) *RouteBuilder {
+	b.defaultResponse = &ResponseError{
+		Message: message,
+		Model:   model,
+	}
+	return b
+}
+
+// Metadata adds or updates a key=value pair to the metadata map.
+func (b *RouteBuilder) Metadata(key string, value interface{}) *RouteBuilder {
+	if b.metadata == nil {
+		b.metadata = map[string]interface{}{}
+	}
+	b.metadata[key] = value
+	return b
+}
+
+// Deprecate sets the value of deprecated to true.  Deprecated routes have a special UI treatment to warn against use
+func (b *RouteBuilder) Deprecate() *RouteBuilder {
+	b.deprecated = true
+	return b
+}
+
+// ResponseError represents a response; not necessarily an error.
+type ResponseError struct {
+	Code      int
+	Message   string
+	Model     interface{}
+	IsDefault bool
+}
+
+func (b *RouteBuilder) servicePath(path string) *RouteBuilder {
+	b.rootPath = path
+	return b
+}
+
+// Filter appends a FilterFunction to the end of filters for this Route to build.
+func (b *RouteBuilder) Filter(filter FilterFunction) *RouteBuilder {
+	b.filters = append(b.filters, filter)
+	return b
+}
+
+// If sets a condition function that controls matching the Route based on custom logic.
+// The condition function is provided the HTTP request and should return true if the route
+// should be considered.
+//
+// Efficiency note: the condition function is called before checking the method, produces, and
+// consumes criteria, so that the correct HTTP status code can be returned.
+//
+// Lifecycle note: no filter functions have been called prior to calling the condition function,
+// so the condition function should not depend on any context that might be set up by container
+// or route filters.
+func (b *RouteBuilder) If(condition RouteSelectionConditionFunction) *RouteBuilder {
+	b.conditions = append(b.conditions, condition)
+	return b
+}
+
+// ContentEncodingEnabled allows you to override the Containers value for auto-compressing this route response.
+func (b *RouteBuilder) ContentEncodingEnabled(enabled bool) *RouteBuilder {
+	b.contentEncodingEnabled = &enabled
+	return b
+}
+
+// If no specific Route path then set to rootPath
+// If no specific Produces then set to rootProduces
+// If no specific Consumes then set to rootConsumes
+func (b *RouteBuilder) copyDefaults(rootProduces, rootConsumes []string) {
+	if len(b.produces) == 0 {
+		b.produces = rootProduces
+	}
+	if len(b.consumes) == 0 {
+		b.consumes = rootConsumes
+	}
+}
+
+// typeNameHandler sets the function that will convert types to strings in the parameter
+// and model definitions.
+func (b *RouteBuilder) typeNameHandler(handler TypeNameHandleFunction) *RouteBuilder {
+	b.typeNameHandleFunc = handler
+	return b
+}
+
+// Build creates a new Route using the specification details collected by the RouteBuilder
+func (b *RouteBuilder) Build() Route {
+	pathExpr, err := newPathExpression(b.currentPath)
+	if err != nil {
+		log.Printf("Invalid path:%s because:%v", b.currentPath, err)
+		os.Exit(1)
+	}
+	if b.function == nil {
+		log.Printf("No function specified for route:" + b.currentPath)
+		os.Exit(1)
+	}
+	operationName := b.operation
+	if len(operationName) == 0 && b.function != nil {
+		// extract from definition
+		operationName = nameOfFunction(b.function)
+	}
+	route := Route{
+		Method:                 b.httpMethod,
+		Path:                   concatPath(b.rootPath, b.currentPath),
+		Produces:               b.produces,
+		Consumes:               b.consumes,
+		Function:               b.function,
+		Filters:                b.filters,
+		If:                     b.conditions,
+		relativePath:           b.currentPath,
+		pathExpr:               pathExpr,
+		Doc:                    b.doc,
+		Notes:                  b.notes,
+		Operation:              operationName,
+		ParameterDocs:          b.parameters,
+		ResponseErrors:         b.errorMap,
+		DefaultResponse:        b.defaultResponse,
+		ReadSample:             b.readSample,
+		WriteSample:            b.writeSample,
+		Metadata:               b.metadata,
+		Deprecated:             b.deprecated,
+		contentEncodingEnabled: b.contentEncodingEnabled,
+	}
+	route.postBuild()
+	return route
+}
+
+func concatPath(path1, path2 string) string {
+	return strings.TrimRight(path1, "/") + "/" + strings.TrimLeft(path2, "/")
+}
+
+var anonymousFuncCount int32
+
+// nameOfFunction returns the short name of the function f for documentation.
+// It uses a runtime feature for debugging ; its value may change for later Go versions.
+func nameOfFunction(f interface{}) string {
+	fun := runtime.FuncForPC(reflect.ValueOf(f).Pointer())
+	tokenized := strings.Split(fun.Name(), ".")
+	last := tokenized[len(tokenized)-1]
+	last = strings.TrimSuffix(last, ")·fm") // < Go 1.5
+	last = strings.TrimSuffix(last, ")-fm") // Go 1.5
+	last = strings.TrimSuffix(last, "·fm")  // < Go 1.5
+	last = strings.TrimSuffix(last, "-fm")  // Go 1.5
+	if last == "func1" {                    // this could mean conflicts in API docs
+		val := atomic.AddInt32(&anonymousFuncCount, 1)
+		last = "func" + fmt.Sprintf("%d", val)
+		atomic.StoreInt32(&anonymousFuncCount, val)
+	}
+	return last
+}
diff --git a/vendor/github.com/emicklei/go-restful/router.go b/vendor/github.com/emicklei/go-restful/router.go
new file mode 100644
index 00000000..19078af1
--- /dev/null
+++ b/vendor/github.com/emicklei/go-restful/router.go
@@ -0,0 +1,20 @@
+package restful
+
+// Copyright 2013 Ernest Micklei. All rights reserved.
+// Use of this source code is governed by a license
+// that can be found in the LICENSE file.
+
+import "net/http"
+
+// A RouteSelector finds the best matching Route given the input HTTP Request
+// RouteSelectors can optionally also implement the PathProcessor interface to also calculate the
+// path parameters after the route has been selected.
+type RouteSelector interface {
+
+	// SelectRoute finds a Route given the input HTTP Request and a list of WebServices.
+	// It returns a selected Route and its containing WebService or an error indicating
+	// a problem.
+	SelectRoute(
+		webServices []*WebService,
+		httpRequest *http.Request) (selectedService *WebService, selected *Route, err error)
+}
diff --git a/vendor/github.com/emicklei/go-restful/service_error.go b/vendor/github.com/emicklei/go-restful/service_error.go
new file mode 100644
index 00000000..62d1108b
--- /dev/null
+++ b/vendor/github.com/emicklei/go-restful/service_error.go
@@ -0,0 +1,23 @@
+package restful
+
+// Copyright 2013 Ernest Micklei. All rights reserved.
+// Use of this source code is governed by a license
+// that can be found in the LICENSE file.
+
+import "fmt"
+
+// ServiceError is a transport object to pass information about a non-Http error occurred in a WebService while processing a request.
+type ServiceError struct {
+	Code    int
+	Message string
+}
+
+// NewError returns a ServiceError using the code and reason
+func NewError(code int, message string) ServiceError {
+	return ServiceError{Code: code, Message: message}
+}
+
+// Error returns a text representation of the service error
+func (s ServiceError) Error() string {
+	return fmt.Sprintf("[ServiceError:%v] %v", s.Code, s.Message)
+}
diff --git a/vendor/github.com/emicklei/go-restful/web_service.go b/vendor/github.com/emicklei/go-restful/web_service.go
new file mode 100644
index 00000000..77ba9a8c
--- /dev/null
+++ b/vendor/github.com/emicklei/go-restful/web_service.go
@@ -0,0 +1,290 @@
+package restful
+
+import (
+	"errors"
+	"os"
+	"reflect"
+	"sync"
+
+	"github.com/emicklei/go-restful/log"
+)
+
+// Copyright 2013 Ernest Micklei. All rights reserved.
+// Use of this source code is governed by a license
+// that can be found in the LICENSE file.
+
+// WebService holds a collection of Route values that bind a Http Method + URL Path to a function.
+type WebService struct {
+	rootPath       string
+	pathExpr       *pathExpression // cached compilation of rootPath as RegExp
+	routes         []Route
+	produces       []string
+	consumes       []string
+	pathParameters []*Parameter
+	filters        []FilterFunction
+	documentation  string
+	apiVersion     string
+
+	typeNameHandleFunc TypeNameHandleFunction
+
+	dynamicRoutes bool
+
+	// protects 'routes' if dynamic routes are enabled
+	routesLock sync.RWMutex
+}
+
+func (w *WebService) SetDynamicRoutes(enable bool) {
+	w.dynamicRoutes = enable
+}
+
+// TypeNameHandleFunction declares functions that can handle translating the name of a sample object
+// into the restful documentation for the service.
+type TypeNameHandleFunction func(sample interface{}) string
+
+// TypeNameHandler sets the function that will convert types to strings in the parameter
+// and model definitions. If not set, the web service will invoke
+// reflect.TypeOf(object).String().
+func (w *WebService) TypeNameHandler(handler TypeNameHandleFunction) *WebService {
+	w.typeNameHandleFunc = handler
+	return w
+}
+
+// reflectTypeName is the default TypeNameHandleFunction and for a given object
+// returns the name that Go identifies it with (e.g. "string" or "v1.Object") via
+// the reflection API.
+func reflectTypeName(sample interface{}) string {
+	return reflect.TypeOf(sample).String()
+}
+
+// compilePathExpression ensures that the path is compiled into a RegEx for those routers that need it.
+func (w *WebService) compilePathExpression() {
+	compiled, err := newPathExpression(w.rootPath)
+	if err != nil {
+		log.Printf("invalid path:%s because:%v", w.rootPath, err)
+		os.Exit(1)
+	}
+	w.pathExpr = compiled
+}
+
+// ApiVersion sets the API version for documentation purposes.
+func (w *WebService) ApiVersion(apiVersion string) *WebService {
+	w.apiVersion = apiVersion
+	return w
+}
+
+// Version returns the API version for documentation purposes.
+func (w *WebService) Version() string { return w.apiVersion }
+
+// Path specifies the root URL template path of the WebService.
+// All Routes will be relative to this path.
+func (w *WebService) Path(root string) *WebService {
+	w.rootPath = root
+	if len(w.rootPath) == 0 {
+		w.rootPath = "/"
+	}
+	w.compilePathExpression()
+	return w
+}
+
+// Param adds a PathParameter to document parameters used in the root path.
+func (w *WebService) Param(parameter *Parameter) *WebService {
+	if w.pathParameters == nil {
+		w.pathParameters = []*Parameter{}
+	}
+	w.pathParameters = append(w.pathParameters, parameter)
+	return w
+}
+
+// PathParameter creates a new Parameter of kind Path for documentation purposes.
+// It is initialized as required with string as its DataType.
+func (w *WebService) PathParameter(name, description string) *Parameter {
+	return PathParameter(name, description)
+}
+
+// PathParameter creates a new Parameter of kind Path for documentation purposes.
+// It is initialized as required with string as its DataType.
+func PathParameter(name, description string) *Parameter {
+	p := &Parameter{&ParameterData{Name: name, Description: description, Required: true, DataType: "string"}}
+	p.bePath()
+	return p
+}
+
+// QueryParameter creates a new Parameter of kind Query for documentation purposes.
+// It is initialized as not required with string as its DataType.
+func (w *WebService) QueryParameter(name, description string) *Parameter {
+	return QueryParameter(name, description)
+}
+
+// QueryParameter creates a new Parameter of kind Query for documentation purposes.
+// It is initialized as not required with string as its DataType.
+func QueryParameter(name, description string) *Parameter {
+	p := &Parameter{&ParameterData{Name: name, Description: description, Required: false, DataType: "string", CollectionFormat: CollectionFormatCSV.String()}}
+	p.beQuery()
+	return p
+}
+
+// BodyParameter creates a new Parameter of kind Body for documentation purposes.
+// It is initialized as required without a DataType.
+func (w *WebService) BodyParameter(name, description string) *Parameter {
+	return BodyParameter(name, description)
+}
+
+// BodyParameter creates a new Parameter of kind Body for documentation purposes.
+// It is initialized as required without a DataType.
+func BodyParameter(name, description string) *Parameter {
+	p := &Parameter{&ParameterData{Name: name, Description: description, Required: true}}
+	p.beBody()
+	return p
+}
+
+// HeaderParameter creates a new Parameter of kind (Http) Header for documentation purposes.
+// It is initialized as not required with string as its DataType.
+func (w *WebService) HeaderParameter(name, description string) *Parameter {
+	return HeaderParameter(name, description)
+}
+
+// HeaderParameter creates a new Parameter of kind (Http) Header for documentation purposes.
+// It is initialized as not required with string as its DataType.
+func HeaderParameter(name, description string) *Parameter {
+	p := &Parameter{&ParameterData{Name: name, Description: description, Required: false, DataType: "string"}}
+	p.beHeader()
+	return p
+}
+
+// FormParameter creates a new Parameter of kind Form (using application/x-www-form-urlencoded) for documentation purposes.
+// It is initialized as required with string as its DataType.
+func (w *WebService) FormParameter(name, description string) *Parameter {
+	return FormParameter(name, description)
+}
+
+// FormParameter creates a new Parameter of kind Form (using application/x-www-form-urlencoded) for documentation purposes.
+// It is initialized as required with string as its DataType.
+func FormParameter(name, description string) *Parameter {
+	p := &Parameter{&ParameterData{Name: name, Description: description, Required: false, DataType: "string"}}
+	p.beForm()
+	return p
+}
+
+// Route creates a new Route using the RouteBuilder and add to the ordered list of Routes.
+func (w *WebService) Route(builder *RouteBuilder) *WebService {
+	w.routesLock.Lock()
+	defer w.routesLock.Unlock()
+	builder.copyDefaults(w.produces, w.consumes)
+	w.routes = append(w.routes, builder.Build())
+	return w
+}
+
+// RemoveRoute removes the specified route, looks for something that matches 'path' and 'method'
+func (w *WebService) RemoveRoute(path, method string) error {
+	if !w.dynamicRoutes {
+		return errors.New("dynamic routes are not enabled.")
+	}
+	w.routesLock.Lock()
+	defer w.routesLock.Unlock()
+	newRoutes := make([]Route, (len(w.routes) - 1))
+	current := 0
+	for ix := range w.routes {
+		if w.routes[ix].Method == method && w.routes[ix].Path == path {
+			continue
+		}
+		newRoutes[current] = w.routes[ix]
+		current = current + 1
+	}
+	w.routes = newRoutes
+	return nil
+}
+
+// Method creates a new RouteBuilder and initialize its http method
+func (w *WebService) Method(httpMethod string) *RouteBuilder {
+	return new(RouteBuilder).typeNameHandler(w.typeNameHandleFunc).servicePath(w.rootPath).Method(httpMethod)
+}
+
+// Produces specifies that this WebService can produce one or more MIME types.
+// Http requests must have one of these values set for the Accept header.
+func (w *WebService) Produces(contentTypes ...string) *WebService {
+	w.produces = contentTypes
+	return w
+}
+
+// Consumes specifies that this WebService can consume one or more MIME types.
+// Http requests must have one of these values set for the Content-Type header.
+func (w *WebService) Consumes(accepts ...string) *WebService {
+	w.consumes = accepts
+	return w
+}
+
+// Routes returns the Routes associated with this WebService
+func (w *WebService) Routes() []Route {
+	if !w.dynamicRoutes {
+		return w.routes
+	}
+	// Make a copy of the array to prevent concurrency problems
+	w.routesLock.RLock()
+	defer w.routesLock.RUnlock()
+	result := make([]Route, len(w.routes))
+	for ix := range w.routes {
+		result[ix] = w.routes[ix]
+	}
+	return result
+}
+
+// RootPath returns the RootPath associated with this WebService. Default "/"
+func (w *WebService) RootPath() string {
+	return w.rootPath
+}
+
+// PathParameters return the path parameter names for (shared among its Routes)
+func (w *WebService) PathParameters() []*Parameter {
+	return w.pathParameters
+}
+
+// Filter adds a filter function to the chain of filters applicable to all its Routes
+func (w *WebService) Filter(filter FilterFunction) *WebService {
+	w.filters = append(w.filters, filter)
+	return w
+}
+
+// Doc is used to set the documentation of this service.
+func (w *WebService) Doc(plainText string) *WebService {
+	w.documentation = plainText
+	return w
+}
+
+// Documentation returns it.
+func (w *WebService) Documentation() string {
+	return w.documentation
+}
+
+/*
+	Convenience methods
+*/
+
+// HEAD is a shortcut for .Method("HEAD").Path(subPath)
+func (w *WebService) HEAD(subPath string) *RouteBuilder {
+	return new(RouteBuilder).typeNameHandler(w.typeNameHandleFunc).servicePath(w.rootPath).Method("HEAD").Path(subPath)
+}
+
+// GET is a shortcut for .Method("GET").Path(subPath)
+func (w *WebService) GET(subPath string) *RouteBuilder {
+	return new(RouteBuilder).typeNameHandler(w.typeNameHandleFunc).servicePath(w.rootPath).Method("GET").Path(subPath)
+}
+
+// POST is a shortcut for .Method("POST").Path(subPath)
+func (w *WebService) POST(subPath string) *RouteBuilder {
+	return new(RouteBuilder).typeNameHandler(w.typeNameHandleFunc).servicePath(w.rootPath).Method("POST").Path(subPath)
+}
+
+// PUT is a shortcut for .Method("PUT").Path(subPath)
+func (w *WebService) PUT(subPath string) *RouteBuilder {
+	return new(RouteBuilder).typeNameHandler(w.typeNameHandleFunc).servicePath(w.rootPath).Method("PUT").Path(subPath)
+}
+
+// PATCH is a shortcut for .Method("PATCH").Path(subPath)
+func (w *WebService) PATCH(subPath string) *RouteBuilder {
+	return new(RouteBuilder).typeNameHandler(w.typeNameHandleFunc).servicePath(w.rootPath).Method("PATCH").Path(subPath)
+}
+
+// DELETE is a shortcut for .Method("DELETE").Path(subPath)
+func (w *WebService) DELETE(subPath string) *RouteBuilder {
+	return new(RouteBuilder).typeNameHandler(w.typeNameHandleFunc).servicePath(w.rootPath).Method("DELETE").Path(subPath)
+}
diff --git a/vendor/github.com/emicklei/go-restful/web_service_container.go b/vendor/github.com/emicklei/go-restful/web_service_container.go
new file mode 100644
index 00000000..c9d31b06
--- /dev/null
+++ b/vendor/github.com/emicklei/go-restful/web_service_container.go
@@ -0,0 +1,39 @@
+package restful
+
+// Copyright 2013 Ernest Micklei. All rights reserved.
+// Use of this source code is governed by a license
+// that can be found in the LICENSE file.
+
+import (
+	"net/http"
+)
+
+// DefaultContainer is a restful.Container that uses http.DefaultServeMux
+var DefaultContainer *Container
+
+func init() {
+	DefaultContainer = NewContainer()
+	DefaultContainer.ServeMux = http.DefaultServeMux
+}
+
+// If set the true then panics will not be caught to return HTTP 500.
+// In that case, Route functions are responsible for handling any error situation.
+// Default value is false = recover from panics. This has performance implications.
+// OBSOLETE ; use restful.DefaultContainer.DoNotRecover(true)
+var DoNotRecover = false
+
+// Add registers a new WebService add it to the DefaultContainer.
+func Add(service *WebService) {
+	DefaultContainer.Add(service)
+}
+
+// Filter appends a container FilterFunction from the DefaultContainer.
+// These are called before dispatching a http.Request to a WebService.
+func Filter(filter FilterFunction) {
+	DefaultContainer.Filter(filter)
+}
+
+// RegisteredWebServices returns the collections of WebServices from the DefaultContainer
+func RegisteredWebServices() []*WebService {
+	return DefaultContainer.RegisteredWebServices()
+}
diff --git a/vendor/github.com/evanphx/json-patch/.travis.yml b/vendor/github.com/evanphx/json-patch/.travis.yml
new file mode 100644
index 00000000..2092c72c
--- /dev/null
+++ b/vendor/github.com/evanphx/json-patch/.travis.yml
@@ -0,0 +1,16 @@
+language: go
+
+go:
+  - 1.8
+  - 1.7
+
+install:
+  - if ! go get code.google.com/p/go.tools/cmd/cover; then go get golang.org/x/tools/cmd/cover; fi
+  - go get github.com/jessevdk/go-flags
+
+script:
+  - go get
+  - go test -cover ./...
+
+notifications:
+  email: false
diff --git a/vendor/github.com/evanphx/json-patch/LICENSE b/vendor/github.com/evanphx/json-patch/LICENSE
new file mode 100644
index 00000000..0eb9b72d
--- /dev/null
+++ b/vendor/github.com/evanphx/json-patch/LICENSE
@@ -0,0 +1,25 @@
+Copyright (c) 2014, Evan Phoenix
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without 
+modification, are permitted provided that the following conditions are met:
+
+* Redistributions of source code must retain the above copyright notice, this
+  list of conditions and the following disclaimer.
+* Redistributions in binary form must reproduce the above copyright notice
+  this list of conditions and the following disclaimer in the documentation
+  and/or other materials provided with the distribution.
+* Neither the name of the Evan Phoenix nor the names of its contributors 
+  may be used to endorse or promote products derived from this software 
+  without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE 
+FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 
+CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 
+OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/evanphx/json-patch/README.md b/vendor/github.com/evanphx/json-patch/README.md
new file mode 100644
index 00000000..9c7f87f7
--- /dev/null
+++ b/vendor/github.com/evanphx/json-patch/README.md
@@ -0,0 +1,297 @@
+# JSON-Patch
+`jsonpatch` is a library which provides functionallity for both applying
+[RFC6902 JSON patches](http://tools.ietf.org/html/rfc6902) against documents, as
+well as for calculating & applying [RFC7396 JSON merge patches](https://tools.ietf.org/html/rfc7396).
+
+[![GoDoc](https://godoc.org/github.com/evanphx/json-patch?status.svg)](http://godoc.org/github.com/evanphx/json-patch)
+[![Build Status](https://travis-ci.org/evanphx/json-patch.svg?branch=master)](https://travis-ci.org/evanphx/json-patch)
+[![Report Card](https://goreportcard.com/badge/github.com/evanphx/json-patch)](https://goreportcard.com/report/github.com/evanphx/json-patch)
+
+# Get It!
+
+**Latest and greatest**: 
+```bash
+go get -u github.com/evanphx/json-patch
+```
+
+**Stable Versions**:
+* Version 4: `go get -u gopkg.in/evanphx/json-patch.v4`
+
+(previous versions below `v3` are unavailable)
+
+# Use It!
+* [Create and apply a merge patch](#create-and-apply-a-merge-patch)
+* [Create and apply a JSON Patch](#create-and-apply-a-json-patch)
+* [Comparing JSON documents](#comparing-json-documents)
+* [Combine merge patches](#combine-merge-patches)
+
+
+# Configuration
+
+* There is a global configuration variable `jsonpatch.SupportNegativeIndices`.
+  This defaults to `true` and enables the non-standard practice of allowing
+  negative indices to mean indices starting at the end of an array. This
+  functionality can be disabled by setting `jsonpatch.SupportNegativeIndices =
+  false`.
+
+* There is a global configuration variable `jsonpatch.AccumulatedCopySizeLimit`,
+  which limits the total size increase in bytes caused by "copy" operations in a
+  patch. It defaults to 0, which means there is no limit.
+
+## Create and apply a merge patch
+Given both an original JSON document and a modified JSON document, you can create
+a [Merge Patch](https://tools.ietf.org/html/rfc7396) document. 
+
+It can describe the changes needed to convert from the original to the 
+modified JSON document.
+
+Once you have a merge patch, you can apply it to other JSON documents using the
+`jsonpatch.MergePatch(document, patch)` function.
+
+```go
+package main
+
+import (
+	"fmt"
+
+	jsonpatch "github.com/evanphx/json-patch"
+)
+
+func main() {
+	// Let's create a merge patch from these two documents...
+	original := []byte(`{"name": "John", "age": 24, "height": 3.21}`)
+	target := []byte(`{"name": "Jane", "age": 24}`)
+
+	patch, err := jsonpatch.CreateMergePatch(original, target)
+	if err != nil {
+		panic(err)
+	}
+
+	// Now lets apply the patch against a different JSON document...
+
+	alternative := []byte(`{"name": "Tina", "age": 28, "height": 3.75}`)
+	modifiedAlternative, err := jsonpatch.MergePatch(alternative, patch)
+
+	fmt.Printf("patch document:   %s\n", patch)
+	fmt.Printf("updated alternative doc: %s\n", modifiedAlternative)
+}
+```
+
+When ran, you get the following output:
+
+```bash
+$ go run main.go
+patch document:   {"height":null,"name":"Jane"}
+updated tina doc: {"age":28,"name":"Jane"}
+```
+
+## Create and apply a JSON Patch
+You can create patch objects using `DecodePatch([]byte)`, which can then 
+be applied against JSON documents.
+
+The following is an example of creating a patch from two operations, and
+applying it against a JSON document.
+
+```go
+package main
+
+import (
+	"fmt"
+
+	jsonpatch "github.com/evanphx/json-patch"
+)
+
+func main() {
+	original := []byte(`{"name": "John", "age": 24, "height": 3.21}`)
+	patchJSON := []byte(`[
+		{"op": "replace", "path": "/name", "value": "Jane"},
+		{"op": "remove", "path": "/height"}
+	]`)
+
+	patch, err := jsonpatch.DecodePatch(patchJSON)
+	if err != nil {
+		panic(err)
+	}
+
+	modified, err := patch.Apply(original)
+	if err != nil {
+		panic(err)
+	}
+
+	fmt.Printf("Original document: %s\n", original)
+	fmt.Printf("Modified document: %s\n", modified)
+}
+```
+
+When ran, you get the following output:
+
+```bash
+$ go run main.go
+Original document: {"name": "John", "age": 24, "height": 3.21}
+Modified document: {"age":24,"name":"Jane"}
+```
+
+## Comparing JSON documents
+Due to potential whitespace and ordering differences, one cannot simply compare
+JSON strings or byte-arrays directly. 
+
+As such, you can instead use `jsonpatch.Equal(document1, document2)` to 
+determine if two JSON documents are _structurally_ equal. This ignores
+whitespace differences, and key-value ordering.
+
+```go
+package main
+
+import (
+	"fmt"
+
+	jsonpatch "github.com/evanphx/json-patch"
+)
+
+func main() {
+	original := []byte(`{"name": "John", "age": 24, "height": 3.21}`)
+	similar := []byte(`
+		{
+			"age": 24,
+			"height": 3.21,
+			"name": "John"
+		}
+	`)
+	different := []byte(`{"name": "Jane", "age": 20, "height": 3.37}`)
+
+	if jsonpatch.Equal(original, similar) {
+		fmt.Println(`"original" is structurally equal to "similar"`)
+	}
+
+	if !jsonpatch.Equal(original, different) {
+		fmt.Println(`"original" is _not_ structurally equal to "similar"`)
+	}
+}
+```
+
+When ran, you get the following output:
+```bash
+$ go run main.go
+"original" is structurally equal to "similar"
+"original" is _not_ structurally equal to "similar"
+```
+
+## Combine merge patches
+Given two JSON merge patch documents, it is possible to combine them into a 
+single merge patch which can describe both set of changes.
+
+The resulting merge patch can be used such that applying it results in a
+document structurally similar as merging each merge patch to the document
+in succession. 
+
+```go
+package main
+
+import (
+	"fmt"
+
+	jsonpatch "github.com/evanphx/json-patch"
+)
+
+func main() {
+	original := []byte(`{"name": "John", "age": 24, "height": 3.21}`)
+
+	nameAndHeight := []byte(`{"height":null,"name":"Jane"}`)
+	ageAndEyes := []byte(`{"age":4.23,"eyes":"blue"}`)
+
+	// Let's combine these merge patch documents...
+	combinedPatch, err := jsonpatch.MergeMergePatches(nameAndHeight, ageAndEyes)
+	if err != nil {
+		panic(err)
+	}
+
+	// Apply each patch individual against the original document
+	withoutCombinedPatch, err := jsonpatch.MergePatch(original, nameAndHeight)
+	if err != nil {
+		panic(err)
+	}
+
+	withoutCombinedPatch, err = jsonpatch.MergePatch(withoutCombinedPatch, ageAndEyes)
+	if err != nil {
+		panic(err)
+	}
+
+	// Apply the combined patch against the original document
+
+	withCombinedPatch, err := jsonpatch.MergePatch(original, combinedPatch)
+	if err != nil {
+		panic(err)
+	}
+
+	// Do both result in the same thing? They should!
+	if jsonpatch.Equal(withCombinedPatch, withoutCombinedPatch) {
+		fmt.Println("Both JSON documents are structurally the same!")
+	}
+
+	fmt.Printf("combined merge patch: %s", combinedPatch)
+}
+```
+
+When ran, you get the following output:
+```bash
+$ go run main.go
+Both JSON documents are structurally the same!
+combined merge patch: {"age":4.23,"eyes":"blue","height":null,"name":"Jane"}
+```
+
+# CLI for comparing JSON documents
+You can install the commandline program `json-patch`.
+
+This program can take multiple JSON patch documents as arguments, 
+and fed a JSON document from `stdin`. It will apply the patch(es) against 
+the document and output the modified doc.
+
+**patch.1.json**
+```json
+[
+    {"op": "replace", "path": "/name", "value": "Jane"},
+    {"op": "remove", "path": "/height"}
+]
+```
+
+**patch.2.json**
+```json
+[
+    {"op": "add", "path": "/address", "value": "123 Main St"},
+    {"op": "replace", "path": "/age", "value": "21"}
+]
+```
+
+**document.json**
+```json
+{
+    "name": "John",
+    "age": 24,
+    "height": 3.21
+}
+```
+
+You can then run:
+
+```bash
+$ go install github.com/evanphx/json-patch/cmd/json-patch
+$ cat document.json | json-patch -p patch.1.json -p patch.2.json
+{"address":"123 Main St","age":"21","name":"Jane"}
+```
+
+# Help It!
+Contributions are welcomed! Leave [an issue](https://github.com/evanphx/json-patch/issues)
+or [create a PR](https://github.com/evanphx/json-patch/compare).
+
+
+Before creating a pull request, we'd ask that you make sure tests are passing
+and that you have added new tests when applicable.
+
+Contributors can run tests using:
+
+```bash
+go test -cover ./...
+```
+
+Builds for pull requests are tested automatically 
+using [TravisCI](https://travis-ci.org/evanphx/json-patch).
diff --git a/vendor/github.com/evanphx/json-patch/errors.go b/vendor/github.com/evanphx/json-patch/errors.go
new file mode 100644
index 00000000..75304b44
--- /dev/null
+++ b/vendor/github.com/evanphx/json-patch/errors.go
@@ -0,0 +1,38 @@
+package jsonpatch
+
+import "fmt"
+
+// AccumulatedCopySizeError is an error type returned when the accumulated size
+// increase caused by copy operations in a patch operation has exceeded the
+// limit.
+type AccumulatedCopySizeError struct {
+	limit       int64
+	accumulated int64
+}
+
+// NewAccumulatedCopySizeError returns an AccumulatedCopySizeError.
+func NewAccumulatedCopySizeError(l, a int64) *AccumulatedCopySizeError {
+	return &AccumulatedCopySizeError{limit: l, accumulated: a}
+}
+
+// Error implements the error interface.
+func (a *AccumulatedCopySizeError) Error() string {
+	return fmt.Sprintf("Unable to complete the copy, the accumulated size increase of copy is %d, exceeding the limit %d", a.accumulated, a.limit)
+}
+
+// ArraySizeError is an error type returned when the array size has exceeded
+// the limit.
+type ArraySizeError struct {
+	limit int
+	size  int
+}
+
+// NewArraySizeError returns an ArraySizeError.
+func NewArraySizeError(l, s int) *ArraySizeError {
+	return &ArraySizeError{limit: l, size: s}
+}
+
+// Error implements the error interface.
+func (a *ArraySizeError) Error() string {
+	return fmt.Sprintf("Unable to create array of size %d, limit is %d", a.size, a.limit)
+}
diff --git a/vendor/github.com/evanphx/json-patch/merge.go b/vendor/github.com/evanphx/json-patch/merge.go
new file mode 100644
index 00000000..6806c4c2
--- /dev/null
+++ b/vendor/github.com/evanphx/json-patch/merge.go
@@ -0,0 +1,383 @@
+package jsonpatch
+
+import (
+	"bytes"
+	"encoding/json"
+	"fmt"
+	"reflect"
+)
+
+func merge(cur, patch *lazyNode, mergeMerge bool) *lazyNode {
+	curDoc, err := cur.intoDoc()
+
+	if err != nil {
+		pruneNulls(patch)
+		return patch
+	}
+
+	patchDoc, err := patch.intoDoc()
+
+	if err != nil {
+		return patch
+	}
+
+	mergeDocs(curDoc, patchDoc, mergeMerge)
+
+	return cur
+}
+
+func mergeDocs(doc, patch *partialDoc, mergeMerge bool) {
+	for k, v := range *patch {
+		if v == nil {
+			if mergeMerge {
+				(*doc)[k] = nil
+			} else {
+				delete(*doc, k)
+			}
+		} else {
+			cur, ok := (*doc)[k]
+
+			if !ok || cur == nil {
+				pruneNulls(v)
+				(*doc)[k] = v
+			} else {
+				(*doc)[k] = merge(cur, v, mergeMerge)
+			}
+		}
+	}
+}
+
+func pruneNulls(n *lazyNode) {
+	sub, err := n.intoDoc()
+
+	if err == nil {
+		pruneDocNulls(sub)
+	} else {
+		ary, err := n.intoAry()
+
+		if err == nil {
+			pruneAryNulls(ary)
+		}
+	}
+}
+
+func pruneDocNulls(doc *partialDoc) *partialDoc {
+	for k, v := range *doc {
+		if v == nil {
+			delete(*doc, k)
+		} else {
+			pruneNulls(v)
+		}
+	}
+
+	return doc
+}
+
+func pruneAryNulls(ary *partialArray) *partialArray {
+	newAry := []*lazyNode{}
+
+	for _, v := range *ary {
+		if v != nil {
+			pruneNulls(v)
+			newAry = append(newAry, v)
+		}
+	}
+
+	*ary = newAry
+
+	return ary
+}
+
+var errBadJSONDoc = fmt.Errorf("Invalid JSON Document")
+var errBadJSONPatch = fmt.Errorf("Invalid JSON Patch")
+var errBadMergeTypes = fmt.Errorf("Mismatched JSON Documents")
+
+// MergeMergePatches merges two merge patches together, such that
+// applying this resulting merged merge patch to a document yields the same
+// as merging each merge patch to the document in succession.
+func MergeMergePatches(patch1Data, patch2Data []byte) ([]byte, error) {
+	return doMergePatch(patch1Data, patch2Data, true)
+}
+
+// MergePatch merges the patchData into the docData.
+func MergePatch(docData, patchData []byte) ([]byte, error) {
+	return doMergePatch(docData, patchData, false)
+}
+
+func doMergePatch(docData, patchData []byte, mergeMerge bool) ([]byte, error) {
+	doc := &partialDoc{}
+
+	docErr := json.Unmarshal(docData, doc)
+
+	patch := &partialDoc{}
+
+	patchErr := json.Unmarshal(patchData, patch)
+
+	if _, ok := docErr.(*json.SyntaxError); ok {
+		return nil, errBadJSONDoc
+	}
+
+	if _, ok := patchErr.(*json.SyntaxError); ok {
+		return nil, errBadJSONPatch
+	}
+
+	if docErr == nil && *doc == nil {
+		return nil, errBadJSONDoc
+	}
+
+	if patchErr == nil && *patch == nil {
+		return nil, errBadJSONPatch
+	}
+
+	if docErr != nil || patchErr != nil {
+		// Not an error, just not a doc, so we turn straight into the patch
+		if patchErr == nil {
+			if mergeMerge {
+				doc = patch
+			} else {
+				doc = pruneDocNulls(patch)
+			}
+		} else {
+			patchAry := &partialArray{}
+			patchErr = json.Unmarshal(patchData, patchAry)
+
+			if patchErr != nil {
+				return nil, errBadJSONPatch
+			}
+
+			pruneAryNulls(patchAry)
+
+			out, patchErr := json.Marshal(patchAry)
+
+			if patchErr != nil {
+				return nil, errBadJSONPatch
+			}
+
+			return out, nil
+		}
+	} else {
+		mergeDocs(doc, patch, mergeMerge)
+	}
+
+	return json.Marshal(doc)
+}
+
+// resemblesJSONArray indicates whether the byte-slice "appears" to be
+// a JSON array or not.
+// False-positives are possible, as this function does not check the internal
+// structure of the array. It only checks that the outer syntax is present and
+// correct.
+func resemblesJSONArray(input []byte) bool {
+	input = bytes.TrimSpace(input)
+
+	hasPrefix := bytes.HasPrefix(input, []byte("["))
+	hasSuffix := bytes.HasSuffix(input, []byte("]"))
+
+	return hasPrefix && hasSuffix
+}
+
+// CreateMergePatch will return a merge patch document capable of converting
+// the original document(s) to the modified document(s).
+// The parameters can be bytes of either two JSON Documents, or two arrays of
+// JSON documents.
+// The merge patch returned follows the specification defined at http://tools.ietf.org/html/draft-ietf-appsawg-json-merge-patch-07
+func CreateMergePatch(originalJSON, modifiedJSON []byte) ([]byte, error) {
+	originalResemblesArray := resemblesJSONArray(originalJSON)
+	modifiedResemblesArray := resemblesJSONArray(modifiedJSON)
+
+	// Do both byte-slices seem like JSON arrays?
+	if originalResemblesArray && modifiedResemblesArray {
+		return createArrayMergePatch(originalJSON, modifiedJSON)
+	}
+
+	// Are both byte-slices are not arrays? Then they are likely JSON objects...
+	if !originalResemblesArray && !modifiedResemblesArray {
+		return createObjectMergePatch(originalJSON, modifiedJSON)
+	}
+
+	// None of the above? Then return an error because of mismatched types.
+	return nil, errBadMergeTypes
+}
+
+// createObjectMergePatch will return a merge-patch document capable of
+// converting the original document to the modified document.
+func createObjectMergePatch(originalJSON, modifiedJSON []byte) ([]byte, error) {
+	originalDoc := map[string]interface{}{}
+	modifiedDoc := map[string]interface{}{}
+
+	err := json.Unmarshal(originalJSON, &originalDoc)
+	if err != nil {
+		return nil, errBadJSONDoc
+	}
+
+	err = json.Unmarshal(modifiedJSON, &modifiedDoc)
+	if err != nil {
+		return nil, errBadJSONDoc
+	}
+
+	dest, err := getDiff(originalDoc, modifiedDoc)
+	if err != nil {
+		return nil, err
+	}
+
+	return json.Marshal(dest)
+}
+
+// createArrayMergePatch will return an array of merge-patch documents capable
+// of converting the original document to the modified document for each
+// pair of JSON documents provided in the arrays.
+// Arrays of mismatched sizes will result in an error.
+func createArrayMergePatch(originalJSON, modifiedJSON []byte) ([]byte, error) {
+	originalDocs := []json.RawMessage{}
+	modifiedDocs := []json.RawMessage{}
+
+	err := json.Unmarshal(originalJSON, &originalDocs)
+	if err != nil {
+		return nil, errBadJSONDoc
+	}
+
+	err = json.Unmarshal(modifiedJSON, &modifiedDocs)
+	if err != nil {
+		return nil, errBadJSONDoc
+	}
+
+	total := len(originalDocs)
+	if len(modifiedDocs) != total {
+		return nil, errBadJSONDoc
+	}
+
+	result := []json.RawMessage{}
+	for i := 0; i < len(originalDocs); i++ {
+		original := originalDocs[i]
+		modified := modifiedDocs[i]
+
+		patch, err := createObjectMergePatch(original, modified)
+		if err != nil {
+			return nil, err
+		}
+
+		result = append(result, json.RawMessage(patch))
+	}
+
+	return json.Marshal(result)
+}
+
+// Returns true if the array matches (must be json types).
+// As is idiomatic for go, an empty array is not the same as a nil array.
+func matchesArray(a, b []interface{}) bool {
+	if len(a) != len(b) {
+		return false
+	}
+	if (a == nil && b != nil) || (a != nil && b == nil) {
+		return false
+	}
+	for i := range a {
+		if !matchesValue(a[i], b[i]) {
+			return false
+		}
+	}
+	return true
+}
+
+// Returns true if the values matches (must be json types)
+// The types of the values must match, otherwise it will always return false
+// If two map[string]interface{} are given, all elements must match.
+func matchesValue(av, bv interface{}) bool {
+	if reflect.TypeOf(av) != reflect.TypeOf(bv) {
+		return false
+	}
+	switch at := av.(type) {
+	case string:
+		bt := bv.(string)
+		if bt == at {
+			return true
+		}
+	case float64:
+		bt := bv.(float64)
+		if bt == at {
+			return true
+		}
+	case bool:
+		bt := bv.(bool)
+		if bt == at {
+			return true
+		}
+	case nil:
+		// Both nil, fine.
+		return true
+	case map[string]interface{}:
+		bt := bv.(map[string]interface{})
+		for key := range at {
+			if !matchesValue(at[key], bt[key]) {
+				return false
+			}
+		}
+		for key := range bt {
+			if !matchesValue(at[key], bt[key]) {
+				return false
+			}
+		}
+		return true
+	case []interface{}:
+		bt := bv.([]interface{})
+		return matchesArray(at, bt)
+	}
+	return false
+}
+
+// getDiff returns the (recursive) difference between a and b as a map[string]interface{}.
+func getDiff(a, b map[string]interface{}) (map[string]interface{}, error) {
+	into := map[string]interface{}{}
+	for key, bv := range b {
+		av, ok := a[key]
+		// value was added
+		if !ok {
+			into[key] = bv
+			continue
+		}
+		// If types have changed, replace completely
+		if reflect.TypeOf(av) != reflect.TypeOf(bv) {
+			into[key] = bv
+			continue
+		}
+		// Types are the same, compare values
+		switch at := av.(type) {
+		case map[string]interface{}:
+			bt := bv.(map[string]interface{})
+			dst := make(map[string]interface{}, len(bt))
+			dst, err := getDiff(at, bt)
+			if err != nil {
+				return nil, err
+			}
+			if len(dst) > 0 {
+				into[key] = dst
+			}
+		case string, float64, bool:
+			if !matchesValue(av, bv) {
+				into[key] = bv
+			}
+		case []interface{}:
+			bt := bv.([]interface{})
+			if !matchesArray(at, bt) {
+				into[key] = bv
+			}
+		case nil:
+			switch bv.(type) {
+			case nil:
+				// Both nil, fine.
+			default:
+				into[key] = bv
+			}
+		default:
+			panic(fmt.Sprintf("Unknown type:%T in key %s", av, key))
+		}
+	}
+	// Now add all deleted values as nil
+	for key := range a {
+		_, found := b[key]
+		if !found {
+			into[key] = nil
+		}
+	}
+	return into, nil
+}
diff --git a/vendor/github.com/evanphx/json-patch/patch.go b/vendor/github.com/evanphx/json-patch/patch.go
new file mode 100644
index 00000000..1b5f95e6
--- /dev/null
+++ b/vendor/github.com/evanphx/json-patch/patch.go
@@ -0,0 +1,776 @@
+package jsonpatch
+
+import (
+	"bytes"
+	"encoding/json"
+	"fmt"
+	"strconv"
+	"strings"
+
+	"github.com/pkg/errors"
+)
+
+const (
+	eRaw = iota
+	eDoc
+	eAry
+)
+
+var (
+	// SupportNegativeIndices decides whether to support non-standard practice of
+	// allowing negative indices to mean indices starting at the end of an array.
+	// Default to true.
+	SupportNegativeIndices bool = true
+	// AccumulatedCopySizeLimit limits the total size increase in bytes caused by
+	// "copy" operations in a patch.
+	AccumulatedCopySizeLimit int64 = 0
+)
+
+var (
+	ErrTestFailed   = errors.New("test failed")
+	ErrMissing      = errors.New("missing value")
+	ErrUnknownType  = errors.New("unknown object type")
+	ErrInvalid      = errors.New("invalid state detected")
+	ErrInvalidIndex = errors.New("invalid index referenced")
+)
+
+type lazyNode struct {
+	raw   *json.RawMessage
+	doc   partialDoc
+	ary   partialArray
+	which int
+}
+
+// Operation is a single JSON-Patch step, such as a single 'add' operation.
+type Operation map[string]*json.RawMessage
+
+// Patch is an ordered collection of Operations.
+type Patch []Operation
+
+type partialDoc map[string]*lazyNode
+type partialArray []*lazyNode
+
+type container interface {
+	get(key string) (*lazyNode, error)
+	set(key string, val *lazyNode) error
+	add(key string, val *lazyNode) error
+	remove(key string) error
+}
+
+func newLazyNode(raw *json.RawMessage) *lazyNode {
+	return &lazyNode{raw: raw, doc: nil, ary: nil, which: eRaw}
+}
+
+func (n *lazyNode) MarshalJSON() ([]byte, error) {
+	switch n.which {
+	case eRaw:
+		return json.Marshal(n.raw)
+	case eDoc:
+		return json.Marshal(n.doc)
+	case eAry:
+		return json.Marshal(n.ary)
+	default:
+		return nil, ErrUnknownType
+	}
+}
+
+func (n *lazyNode) UnmarshalJSON(data []byte) error {
+	dest := make(json.RawMessage, len(data))
+	copy(dest, data)
+	n.raw = &dest
+	n.which = eRaw
+	return nil
+}
+
+func deepCopy(src *lazyNode) (*lazyNode, int, error) {
+	if src == nil {
+		return nil, 0, nil
+	}
+	a, err := src.MarshalJSON()
+	if err != nil {
+		return nil, 0, err
+	}
+	sz := len(a)
+	ra := make(json.RawMessage, sz)
+	copy(ra, a)
+	return newLazyNode(&ra), sz, nil
+}
+
+func (n *lazyNode) intoDoc() (*partialDoc, error) {
+	if n.which == eDoc {
+		return &n.doc, nil
+	}
+
+	if n.raw == nil {
+		return nil, ErrInvalid
+	}
+
+	err := json.Unmarshal(*n.raw, &n.doc)
+
+	if err != nil {
+		return nil, err
+	}
+
+	n.which = eDoc
+	return &n.doc, nil
+}
+
+func (n *lazyNode) intoAry() (*partialArray, error) {
+	if n.which == eAry {
+		return &n.ary, nil
+	}
+
+	if n.raw == nil {
+		return nil, ErrInvalid
+	}
+
+	err := json.Unmarshal(*n.raw, &n.ary)
+
+	if err != nil {
+		return nil, err
+	}
+
+	n.which = eAry
+	return &n.ary, nil
+}
+
+func (n *lazyNode) compact() []byte {
+	buf := &bytes.Buffer{}
+
+	if n.raw == nil {
+		return nil
+	}
+
+	err := json.Compact(buf, *n.raw)
+
+	if err != nil {
+		return *n.raw
+	}
+
+	return buf.Bytes()
+}
+
+func (n *lazyNode) tryDoc() bool {
+	if n.raw == nil {
+		return false
+	}
+
+	err := json.Unmarshal(*n.raw, &n.doc)
+
+	if err != nil {
+		return false
+	}
+
+	n.which = eDoc
+	return true
+}
+
+func (n *lazyNode) tryAry() bool {
+	if n.raw == nil {
+		return false
+	}
+
+	err := json.Unmarshal(*n.raw, &n.ary)
+
+	if err != nil {
+		return false
+	}
+
+	n.which = eAry
+	return true
+}
+
+func (n *lazyNode) equal(o *lazyNode) bool {
+	if n.which == eRaw {
+		if !n.tryDoc() && !n.tryAry() {
+			if o.which != eRaw {
+				return false
+			}
+
+			return bytes.Equal(n.compact(), o.compact())
+		}
+	}
+
+	if n.which == eDoc {
+		if o.which == eRaw {
+			if !o.tryDoc() {
+				return false
+			}
+		}
+
+		if o.which != eDoc {
+			return false
+		}
+
+		for k, v := range n.doc {
+			ov, ok := o.doc[k]
+
+			if !ok {
+				return false
+			}
+
+			if v == nil && ov == nil {
+				continue
+			}
+
+			if !v.equal(ov) {
+				return false
+			}
+		}
+
+		return true
+	}
+
+	if o.which != eAry && !o.tryAry() {
+		return false
+	}
+
+	if len(n.ary) != len(o.ary) {
+		return false
+	}
+
+	for idx, val := range n.ary {
+		if !val.equal(o.ary[idx]) {
+			return false
+		}
+	}
+
+	return true
+}
+
+// Kind reads the "op" field of the Operation.
+func (o Operation) Kind() string {
+	if obj, ok := o["op"]; ok && obj != nil {
+		var op string
+
+		err := json.Unmarshal(*obj, &op)
+
+		if err != nil {
+			return "unknown"
+		}
+
+		return op
+	}
+
+	return "unknown"
+}
+
+// Path reads the "path" field of the Operation.
+func (o Operation) Path() (string, error) {
+	if obj, ok := o["path"]; ok && obj != nil {
+		var op string
+
+		err := json.Unmarshal(*obj, &op)
+
+		if err != nil {
+			return "unknown", err
+		}
+
+		return op, nil
+	}
+
+	return "unknown", errors.Wrapf(ErrMissing, "operation missing path field")
+}
+
+// From reads the "from" field of the Operation.
+func (o Operation) From() (string, error) {
+	if obj, ok := o["from"]; ok && obj != nil {
+		var op string
+
+		err := json.Unmarshal(*obj, &op)
+
+		if err != nil {
+			return "unknown", err
+		}
+
+		return op, nil
+	}
+
+	return "unknown", errors.Wrapf(ErrMissing, "operation, missing from field")
+}
+
+func (o Operation) value() *lazyNode {
+	if obj, ok := o["value"]; ok {
+		return newLazyNode(obj)
+	}
+
+	return nil
+}
+
+// ValueInterface decodes the operation value into an interface.
+func (o Operation) ValueInterface() (interface{}, error) {
+	if obj, ok := o["value"]; ok && obj != nil {
+		var v interface{}
+
+		err := json.Unmarshal(*obj, &v)
+
+		if err != nil {
+			return nil, err
+		}
+
+		return v, nil
+	}
+
+	return nil, errors.Wrapf(ErrMissing, "operation, missing value field")
+}
+
+func isArray(buf []byte) bool {
+Loop:
+	for _, c := range buf {
+		switch c {
+		case ' ':
+		case '\n':
+		case '\t':
+			continue
+		case '[':
+			return true
+		default:
+			break Loop
+		}
+	}
+
+	return false
+}
+
+func findObject(pd *container, path string) (container, string) {
+	doc := *pd
+
+	split := strings.Split(path, "/")
+
+	if len(split) < 2 {
+		return nil, ""
+	}
+
+	parts := split[1 : len(split)-1]
+
+	key := split[len(split)-1]
+
+	var err error
+
+	for _, part := range parts {
+
+		next, ok := doc.get(decodePatchKey(part))
+
+		if next == nil || ok != nil {
+			return nil, ""
+		}
+
+		if isArray(*next.raw) {
+			doc, err = next.intoAry()
+
+			if err != nil {
+				return nil, ""
+			}
+		} else {
+			doc, err = next.intoDoc()
+
+			if err != nil {
+				return nil, ""
+			}
+		}
+	}
+
+	return doc, decodePatchKey(key)
+}
+
+func (d *partialDoc) set(key string, val *lazyNode) error {
+	(*d)[key] = val
+	return nil
+}
+
+func (d *partialDoc) add(key string, val *lazyNode) error {
+	(*d)[key] = val
+	return nil
+}
+
+func (d *partialDoc) get(key string) (*lazyNode, error) {
+	return (*d)[key], nil
+}
+
+func (d *partialDoc) remove(key string) error {
+	_, ok := (*d)[key]
+	if !ok {
+		return errors.Wrapf(ErrMissing, "Unable to remove nonexistent key: %s", key)
+	}
+
+	delete(*d, key)
+	return nil
+}
+
+// set should only be used to implement the "replace" operation, so "key" must
+// be an already existing index in "d".
+func (d *partialArray) set(key string, val *lazyNode) error {
+	idx, err := strconv.Atoi(key)
+	if err != nil {
+		return err
+	}
+	(*d)[idx] = val
+	return nil
+}
+
+func (d *partialArray) add(key string, val *lazyNode) error {
+	if key == "-" {
+		*d = append(*d, val)
+		return nil
+	}
+
+	idx, err := strconv.Atoi(key)
+	if err != nil {
+		return errors.Wrapf(err, "value was not a proper array index: '%s'", key)
+	}
+
+	sz := len(*d) + 1
+
+	ary := make([]*lazyNode, sz)
+
+	cur := *d
+
+	if idx >= len(ary) {
+		return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx)
+	}
+
+	if SupportNegativeIndices {
+		if idx < -len(ary) {
+			return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx)
+		}
+
+		if idx < 0 {
+			idx += len(ary)
+		}
+	}
+
+	copy(ary[0:idx], cur[0:idx])
+	ary[idx] = val
+	copy(ary[idx+1:], cur[idx:])
+
+	*d = ary
+	return nil
+}
+
+func (d *partialArray) get(key string) (*lazyNode, error) {
+	idx, err := strconv.Atoi(key)
+
+	if err != nil {
+		return nil, err
+	}
+
+	if idx >= len(*d) {
+		return nil, errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx)
+	}
+
+	return (*d)[idx], nil
+}
+
+func (d *partialArray) remove(key string) error {
+	idx, err := strconv.Atoi(key)
+	if err != nil {
+		return err
+	}
+
+	cur := *d
+
+	if idx >= len(cur) {
+		return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx)
+	}
+
+	if SupportNegativeIndices {
+		if idx < -len(cur) {
+			return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx)
+		}
+
+		if idx < 0 {
+			idx += len(cur)
+		}
+	}
+
+	ary := make([]*lazyNode, len(cur)-1)
+
+	copy(ary[0:idx], cur[0:idx])
+	copy(ary[idx:], cur[idx+1:])
+
+	*d = ary
+	return nil
+
+}
+
+func (p Patch) add(doc *container, op Operation) error {
+	path, err := op.Path()
+	if err != nil {
+		return errors.Wrapf(ErrMissing, "add operation failed to decode path")
+	}
+
+	con, key := findObject(doc, path)
+
+	if con == nil {
+		return errors.Wrapf(ErrMissing, "add operation does not apply: doc is missing path: \"%s\"", path)
+	}
+
+	err = con.add(key, op.value())
+	if err != nil {
+		return errors.Wrapf(err, "error in add for path: '%s'", path)
+	}
+
+	return nil
+}
+
+func (p Patch) remove(doc *container, op Operation) error {
+	path, err := op.Path()
+	if err != nil {
+		return errors.Wrapf(ErrMissing, "remove operation failed to decode path")
+	}
+
+	con, key := findObject(doc, path)
+
+	if con == nil {
+		return errors.Wrapf(ErrMissing, "remove operation does not apply: doc is missing path: \"%s\"", path)
+	}
+
+	err = con.remove(key)
+	if err != nil {
+		return errors.Wrapf(err, "error in remove for path: '%s'", path)
+	}
+
+	return nil
+}
+
+func (p Patch) replace(doc *container, op Operation) error {
+	path, err := op.Path()
+	if err != nil {
+		return errors.Wrapf(err, "replace operation failed to decode path")
+	}
+
+	con, key := findObject(doc, path)
+
+	if con == nil {
+		return errors.Wrapf(ErrMissing, "replace operation does not apply: doc is missing path: %s", path)
+	}
+
+	_, ok := con.get(key)
+	if ok != nil {
+		return errors.Wrapf(ErrMissing, "replace operation does not apply: doc is missing key: %s", path)
+	}
+
+	err = con.set(key, op.value())
+	if err != nil {
+		return errors.Wrapf(err, "error in remove for path: '%s'", path)
+	}
+
+	return nil
+}
+
+func (p Patch) move(doc *container, op Operation) error {
+	from, err := op.From()
+	if err != nil {
+		return errors.Wrapf(err, "move operation failed to decode from")
+	}
+
+	con, key := findObject(doc, from)
+
+	if con == nil {
+		return errors.Wrapf(ErrMissing, "move operation does not apply: doc is missing from path: %s", from)
+	}
+
+	val, err := con.get(key)
+	if err != nil {
+		return errors.Wrapf(err, "error in move for path: '%s'", key)
+	}
+
+	err = con.remove(key)
+	if err != nil {
+		return errors.Wrapf(err, "error in move for path: '%s'", key)
+	}
+
+	path, err := op.Path()
+	if err != nil {
+		return errors.Wrapf(err, "move operation failed to decode path")
+	}
+
+	con, key = findObject(doc, path)
+
+	if con == nil {
+		return errors.Wrapf(ErrMissing, "move operation does not apply: doc is missing destination path: %s", path)
+	}
+
+	err = con.add(key, val)
+	if err != nil {
+		return errors.Wrapf(err, "error in move for path: '%s'", path)
+	}
+
+	return nil
+}
+
+func (p Patch) test(doc *container, op Operation) error {
+	path, err := op.Path()
+	if err != nil {
+		return errors.Wrapf(err, "test operation failed to decode path")
+	}
+
+	con, key := findObject(doc, path)
+
+	if con == nil {
+		return errors.Wrapf(ErrMissing, "test operation does not apply: is missing path: %s", path)
+	}
+
+	val, err := con.get(key)
+	if err != nil {
+		return errors.Wrapf(err, "error in test for path: '%s'", path)
+	}
+
+	if val == nil {
+		if op.value().raw == nil {
+			return nil
+		}
+		return errors.Wrapf(ErrTestFailed, "testing value %s failed", path)
+	} else if op.value() == nil {
+		return errors.Wrapf(ErrTestFailed, "testing value %s failed", path)
+	}
+
+	if val.equal(op.value()) {
+		return nil
+	}
+
+	return errors.Wrapf(ErrTestFailed, "testing value %s failed", path)
+}
+
+func (p Patch) copy(doc *container, op Operation, accumulatedCopySize *int64) error {
+	from, err := op.From()
+	if err != nil {
+		return errors.Wrapf(err, "copy operation failed to decode from")
+	}
+
+	con, key := findObject(doc, from)
+
+	if con == nil {
+		return errors.Wrapf(ErrMissing, "copy operation does not apply: doc is missing from path: %s", from)
+	}
+
+	val, err := con.get(key)
+	if err != nil {
+		return errors.Wrapf(err, "error in copy for from: '%s'", from)
+	}
+
+	path, err := op.Path()
+	if err != nil {
+		return errors.Wrapf(ErrMissing, "copy operation failed to decode path")
+	}
+
+	con, key = findObject(doc, path)
+
+	if con == nil {
+		return errors.Wrapf(ErrMissing, "copy operation does not apply: doc is missing destination path: %s", path)
+	}
+
+	valCopy, sz, err := deepCopy(val)
+	if err != nil {
+		return errors.Wrapf(err, "error while performing deep copy")
+	}
+
+	(*accumulatedCopySize) += int64(sz)
+	if AccumulatedCopySizeLimit > 0 && *accumulatedCopySize > AccumulatedCopySizeLimit {
+		return NewAccumulatedCopySizeError(AccumulatedCopySizeLimit, *accumulatedCopySize)
+	}
+
+	err = con.add(key, valCopy)
+	if err != nil {
+		return errors.Wrapf(err, "error while adding value during copy")
+	}
+
+	return nil
+}
+
+// Equal indicates if 2 JSON documents have the same structural equality.
+func Equal(a, b []byte) bool {
+	ra := make(json.RawMessage, len(a))
+	copy(ra, a)
+	la := newLazyNode(&ra)
+
+	rb := make(json.RawMessage, len(b))
+	copy(rb, b)
+	lb := newLazyNode(&rb)
+
+	return la.equal(lb)
+}
+
+// DecodePatch decodes the passed JSON document as an RFC 6902 patch.
+func DecodePatch(buf []byte) (Patch, error) {
+	var p Patch
+
+	err := json.Unmarshal(buf, &p)
+
+	if err != nil {
+		return nil, err
+	}
+
+	return p, nil
+}
+
+// Apply mutates a JSON document according to the patch, and returns the new
+// document.
+func (p Patch) Apply(doc []byte) ([]byte, error) {
+	return p.ApplyIndent(doc, "")
+}
+
+// ApplyIndent mutates a JSON document according to the patch, and returns the new
+// document indented.
+func (p Patch) ApplyIndent(doc []byte, indent string) ([]byte, error) {
+	var pd container
+	if doc[0] == '[' {
+		pd = &partialArray{}
+	} else {
+		pd = &partialDoc{}
+	}
+
+	err := json.Unmarshal(doc, pd)
+
+	if err != nil {
+		return nil, err
+	}
+
+	err = nil
+
+	var accumulatedCopySize int64
+
+	for _, op := range p {
+		switch op.Kind() {
+		case "add":
+			err = p.add(&pd, op)
+		case "remove":
+			err = p.remove(&pd, op)
+		case "replace":
+			err = p.replace(&pd, op)
+		case "move":
+			err = p.move(&pd, op)
+		case "test":
+			err = p.test(&pd, op)
+		case "copy":
+			err = p.copy(&pd, op, &accumulatedCopySize)
+		default:
+			err = fmt.Errorf("Unexpected kind: %s", op.Kind())
+		}
+
+		if err != nil {
+			return nil, err
+		}
+	}
+
+	if indent != "" {
+		return json.MarshalIndent(pd, "", indent)
+	}
+
+	return json.Marshal(pd)
+}
+
+// From http://tools.ietf.org/html/rfc6901#section-4 :
+//
+// Evaluation of each reference token begins by decoding any escaped
+// character sequence.  This is performed by first transforming any
+// occurrence of the sequence '~1' to '/', and then transforming any
+// occurrence of the sequence '~0' to '~'.
+
+var (
+	rfc6901Decoder = strings.NewReplacer("~1", "/", "~0", "~")
+)
+
+func decodePatchKey(k string) string {
+	return rfc6901Decoder.Replace(k)
+}
diff --git a/vendor/github.com/exponent-io/jsonpath/.gitignore b/vendor/github.com/exponent-io/jsonpath/.gitignore
new file mode 100644
index 00000000..daf913b1
--- /dev/null
+++ b/vendor/github.com/exponent-io/jsonpath/.gitignore
@@ -0,0 +1,24 @@
+# Compiled Object files, Static and Dynamic libs (Shared Objects)
+*.o
+*.a
+*.so
+
+# Folders
+_obj
+_test
+
+# Architecture specific extensions/prefixes
+*.[568vq]
+[568vq].out
+
+*.cgo1.go
+*.cgo2.c
+_cgo_defun.c
+_cgo_gotypes.go
+_cgo_export.*
+
+_testmain.go
+
+*.exe
+*.test
+*.prof
diff --git a/vendor/github.com/exponent-io/jsonpath/.travis.yml b/vendor/github.com/exponent-io/jsonpath/.travis.yml
new file mode 100644
index 00000000..f4f458a4
--- /dev/null
+++ b/vendor/github.com/exponent-io/jsonpath/.travis.yml
@@ -0,0 +1,5 @@
+language: go
+
+go:
+  - 1.5
+  - tip
diff --git a/vendor/github.com/exponent-io/jsonpath/LICENSE b/vendor/github.com/exponent-io/jsonpath/LICENSE
new file mode 100644
index 00000000..54197725
--- /dev/null
+++ b/vendor/github.com/exponent-io/jsonpath/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2015 Exponent Labs LLC
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/vendor/github.com/exponent-io/jsonpath/README.md b/vendor/github.com/exponent-io/jsonpath/README.md
new file mode 100644
index 00000000..382fb313
--- /dev/null
+++ b/vendor/github.com/exponent-io/jsonpath/README.md
@@ -0,0 +1,66 @@
+[![GoDoc](https://godoc.org/github.com/exponent-io/jsonpath?status.svg)](https://godoc.org/github.com/exponent-io/jsonpath)
+[![Build Status](https://travis-ci.org/exponent-io/jsonpath.svg?branch=master)](https://travis-ci.org/exponent-io/jsonpath)
+
+# jsonpath
+
+This package extends the [json.Decoder](https://golang.org/pkg/encoding/json/#Decoder) to support navigating a stream of JSON tokens. You should be able to use this extended Decoder places where a json.Decoder would have been used.
+
+This Decoder has the following enhancements...
+ * The [Scan](https://godoc.org/github.com/exponent-io/jsonpath/#Decoder.Scan) method supports scanning a JSON stream while extracting particular values along the way using [PathActions](https://godoc.org/github.com/exponent-io/jsonpath#PathActions).
+ * The [SeekTo](https://godoc.org/github.com/exponent-io/jsonpath#Decoder.SeekTo) method supports seeking forward in a JSON token stream to a particular path.
+ * The [Path](https://godoc.org/github.com/exponent-io/jsonpath#Decoder.Path) method returns the path of the most recently parsed token.
+ * The [Token](https://godoc.org/github.com/exponent-io/jsonpath#Decoder.Token) method has been modified to distinguish between strings that are object keys and strings that are values. Object key strings are returned as the [KeyString](https://godoc.org/github.com/exponent-io/jsonpath#KeyString) type rather than a native string.
+
+## Installation
+
+    go get -u github.com/exponent-io/jsonpath
+
+## Example Usage
+
+#### SeekTo
+
+```go
+import "github.com/exponent-io/jsonpath"
+
+var j = []byte(`[
+  {"Space": "YCbCr", "Point": {"Y": 255, "Cb": 0, "Cr": -10}},
+  {"Space": "RGB",   "Point": {"R": 98, "G": 218, "B": 255}}
+]`)
+
+w := json.NewDecoder(bytes.NewReader(j))
+var v interface{}
+
+w.SeekTo(1, "Point", "G")
+w.Decode(&v) // v is 218
+```
+
+#### Scan with PathActions
+
+```go
+var j = []byte(`{"colors":[
+  {"Space": "YCbCr", "Point": {"Y": 255, "Cb": 0, "Cr": -10, "A": 58}},
+  {"Space": "RGB",   "Point": {"R": 98, "G": 218, "B": 255, "A": 231}}
+]}`)
+
+var actions PathActions
+
+// Extract the value at Point.A
+actions.Add(func(d *Decoder) error {
+  var alpha int
+  err := d.Decode(&alpha)
+  fmt.Printf("Alpha: %v\n", alpha)
+  return err
+}, "Point", "A")
+
+w := NewDecoder(bytes.NewReader(j))
+w.SeekTo("colors", 0)
+
+var ok = true
+var err error
+for ok {
+  ok, err = w.Scan(&actions)
+  if err != nil && err != io.EOF {
+    panic(err)
+  }
+}
+```
diff --git a/vendor/github.com/exponent-io/jsonpath/decoder.go b/vendor/github.com/exponent-io/jsonpath/decoder.go
new file mode 100644
index 00000000..31de46c7
--- /dev/null
+++ b/vendor/github.com/exponent-io/jsonpath/decoder.go
@@ -0,0 +1,210 @@
+package jsonpath
+
+import (
+	"encoding/json"
+	"io"
+)
+
+// KeyString is returned from Decoder.Token to represent each key in a JSON object value.
+type KeyString string
+
+// Decoder extends the Go runtime's encoding/json.Decoder to support navigating in a stream of JSON tokens.
+type Decoder struct {
+	json.Decoder
+
+	path    JsonPath
+	context jsonContext
+}
+
+// NewDecoder creates a new instance of the extended JSON Decoder.
+func NewDecoder(r io.Reader) *Decoder {
+	return &Decoder{Decoder: *json.NewDecoder(r)}
+}
+
+// SeekTo causes the Decoder to move forward to a given path in the JSON structure.
+//
+// The path argument must consist of strings or integers. Each string specifies an JSON object key, and
+// each integer specifies an index into a JSON array.
+//
+// Consider the JSON structure
+//
+//  { "a": [0,"s",12e4,{"b":0,"v":35} ] }
+//
+// SeekTo("a",3,"v") will move to the value referenced by the "a" key in the current object,
+// followed by a move to the 4th value (index 3) in the array, followed by a move to the value at key "v".
+// In this example, a subsequent call to the decoder's Decode() would unmarshal the value 35.
+//
+// SeekTo returns a boolean value indicating whether a match was found.
+//
+// Decoder is intended to be used with a stream of tokens. As a result it navigates forward only.
+func (d *Decoder) SeekTo(path ...interface{}) (bool, error) {
+
+	if len(path) == 0 {
+		return len(d.path) == 0, nil
+	}
+	last := len(path) - 1
+	if i, ok := path[last].(int); ok {
+		path[last] = i - 1
+	}
+
+	for {
+		if d.path.Equal(path) {
+			return true, nil
+		}
+		_, err := d.Token()
+		if err == io.EOF {
+			return false, nil
+		} else if err != nil {
+			return false, err
+		}
+	}
+}
+
+// Decode reads the next JSON-encoded value from its input and stores it in the value pointed to by v. This is
+// equivalent to encoding/json.Decode().
+func (d *Decoder) Decode(v interface{}) error {
+	switch d.context {
+	case objValue:
+		d.context = objKey
+		break
+	case arrValue:
+		d.path.incTop()
+		break
+	}
+	return d.Decoder.Decode(v)
+}
+
+// Path returns a slice of string and/or int values representing the path from the root of the JSON object to the
+// position of the most-recently parsed token.
+func (d *Decoder) Path() JsonPath {
+	p := make(JsonPath, len(d.path))
+	copy(p, d.path)
+	return p
+}
+
+// Token is equivalent to the Token() method on json.Decoder. The primary difference is that it distinguishes
+// between strings that are keys and and strings that are values. String tokens that are object keys are returned as a
+// KeyString rather than as a native string.
+func (d *Decoder) Token() (json.Token, error) {
+	t, err := d.Decoder.Token()
+	if err != nil {
+		return t, err
+	}
+
+	if t == nil {
+		switch d.context {
+		case objValue:
+			d.context = objKey
+			break
+		case arrValue:
+			d.path.incTop()
+			break
+		}
+		return t, err
+	}
+
+	switch t := t.(type) {
+	case json.Delim:
+		switch t {
+		case json.Delim('{'):
+			if d.context == arrValue {
+				d.path.incTop()
+			}
+			d.path.push("")
+			d.context = objKey
+			break
+		case json.Delim('}'):
+			d.path.pop()
+			d.context = d.path.inferContext()
+			break
+		case json.Delim('['):
+			if d.context == arrValue {
+				d.path.incTop()
+			}
+			d.path.push(-1)
+			d.context = arrValue
+			break
+		case json.Delim(']'):
+			d.path.pop()
+			d.context = d.path.inferContext()
+			break
+		}
+	case float64, json.Number, bool:
+		switch d.context {
+		case objValue:
+			d.context = objKey
+			break
+		case arrValue:
+			d.path.incTop()
+			break
+		}
+		break
+	case string:
+		switch d.context {
+		case objKey:
+			d.path.nameTop(t)
+			d.context = objValue
+			return KeyString(t), err
+		case objValue:
+			d.context = objKey
+		case arrValue:
+			d.path.incTop()
+		}
+		break
+	}
+
+	return t, err
+}
+
+// Scan moves forward over the JSON stream consuming all the tokens at the current level (current object, current array)
+// invoking each matching PathAction along the way.
+//
+// Scan returns true if there are more contiguous values to scan (for example in an array).
+func (d *Decoder) Scan(ext *PathActions) (bool, error) {
+
+	rootPath := d.Path()
+
+	// If this is an array path, increment the root path in our local copy.
+	if rootPath.inferContext() == arrValue {
+		rootPath.incTop()
+	}
+
+	for {
+		// advance the token position
+		_, err := d.Token()
+		if err != nil {
+			return false, err
+		}
+
+	match:
+		var relPath JsonPath
+
+		// capture the new JSON path
+		path := d.Path()
+
+		if len(path) > len(rootPath) {
+			// capture the path relative to where the scan started
+			relPath = path[len(rootPath):]
+		} else {
+			// if the path is not longer than the root, then we are done with this scan
+			// return boolean flag indicating if there are more items to scan at the same level
+			return d.Decoder.More(), nil
+		}
+
+		// match the relative path against the path actions
+		if node := ext.node.match(relPath); node != nil {
+			if node.action != nil {
+				// we have a match so execute the action
+				err = node.action(d)
+				if err != nil {
+					return d.Decoder.More(), err
+				}
+				// The action may have advanced the decoder. If we are in an array, advancing it further would
+				// skip tokens. So, if we are scanning an array, jump to the top without advancing the token.
+				if d.path.inferContext() == arrValue && d.Decoder.More() {
+					goto match
+				}
+			}
+		}
+	}
+}
diff --git a/vendor/github.com/exponent-io/jsonpath/path.go b/vendor/github.com/exponent-io/jsonpath/path.go
new file mode 100644
index 00000000..d7db2ad3
--- /dev/null
+++ b/vendor/github.com/exponent-io/jsonpath/path.go
@@ -0,0 +1,67 @@
+// Extends the Go runtime's json.Decoder enabling navigation of a stream of json tokens.
+package jsonpath
+
+import "fmt"
+
+type jsonContext int
+
+const (
+	none jsonContext = iota
+	objKey
+	objValue
+	arrValue
+)
+
+// AnyIndex can be used in a pattern to match any array index.
+const AnyIndex = -2
+
+// JsonPath is a slice of strings and/or integers. Each string specifies an JSON object key, and
+// each integer specifies an index into a JSON array.
+type JsonPath []interface{}
+
+func (p *JsonPath) push(n interface{}) { *p = append(*p, n) }
+func (p *JsonPath) pop()               { *p = (*p)[:len(*p)-1] }
+
+// increment the index at the top of the stack (must be an array index)
+func (p *JsonPath) incTop() { (*p)[len(*p)-1] = (*p)[len(*p)-1].(int) + 1 }
+
+// name the key at the top of the stack (must be an object key)
+func (p *JsonPath) nameTop(n string) { (*p)[len(*p)-1] = n }
+
+// infer the context from the item at the top of the stack
+func (p *JsonPath) inferContext() jsonContext {
+	if len(*p) == 0 {
+		return none
+	}
+	t := (*p)[len(*p)-1]
+	switch t.(type) {
+	case string:
+		return objKey
+	case int:
+		return arrValue
+	default:
+		panic(fmt.Sprintf("Invalid stack type %T", t))
+	}
+}
+
+// Equal tests for equality between two JsonPath types.
+func (p *JsonPath) Equal(o JsonPath) bool {
+	if len(*p) != len(o) {
+		return false
+	}
+	for i, v := range *p {
+		if v != o[i] {
+			return false
+		}
+	}
+	return true
+}
+
+func (p *JsonPath) HasPrefix(o JsonPath) bool {
+	for i, v := range o {
+		if v != (*p)[i] {
+			return false
+		}
+	}
+	return true
+}
diff --git a/vendor/github.com/exponent-io/jsonpath/pathaction.go b/vendor/github.com/exponent-io/jsonpath/pathaction.go
new file mode 100644
index 00000000..497ed686
--- /dev/null
+++ b/vendor/github.com/exponent-io/jsonpath/pathaction.go
@@ -0,0 +1,61 @@
+package jsonpath
+
+// pathNode is used to construct a trie of paths to be matched
+type pathNode struct {
+	matchOn    interface{} // string, or integer
+	childNodes []pathNode
+	action     DecodeAction
+}
+
+// match climbs the trie to find a node that matches the given JSON path.
+func (n *pathNode) match(path JsonPath) *pathNode {
+	var node *pathNode = n
+	for _, ps := range path {
+		found := false
+		for i, n := range node.childNodes {
+			if n.matchOn == ps {
+				node = &node.childNodes[i]
+				found = true
+				break
+			} else if _, ok := ps.(int); ok && n.matchOn == AnyIndex {
+				node = &node.childNodes[i]
+				found = true
+				break
+			}
+		}
+		if !found {
+			return nil
+		}
+	}
+	return node
+}
+
+// PathActions represents a collection of DecodeAction functions that should be called at certain path positions
+// when scanning the JSON stream. PathActions can be created once and used many times in one or more JSON streams.
+type PathActions struct {
+	node pathNode
+}
+
+// DecodeAction handlers are called by the Decoder when scanning objects. See PathActions.Add for more detail.
+type DecodeAction func(d *Decoder) error
+
+// Add specifies an action to call on the Decoder when the specified path is encountered.
+func (je *PathActions) Add(action DecodeAction, path ...interface{}) {
+
+	var node *pathNode = &je.node
+	for _, ps := range path {
+		found := false
+		for i, n := range node.childNodes {
+			if n.matchOn == ps {
+				node = &node.childNodes[i]
+				found = true
+				break
+			}
+		}
+		if !found {
+			node.childNodes = append(node.childNodes, pathNode{matchOn: ps})
+			node = &node.childNodes[len(node.childNodes)-1]
+		}
+	}
+	node.action = action
+}
diff --git a/vendor/github.com/go-openapi/jsonpointer/.editorconfig b/vendor/github.com/go-openapi/jsonpointer/.editorconfig
new file mode 100644
index 00000000..3152da69
--- /dev/null
+++ b/vendor/github.com/go-openapi/jsonpointer/.editorconfig
@@ -0,0 +1,26 @@
+# top-most EditorConfig file
+root = true
+
+# Unix-style newlines with a newline ending every file
+[*]
+end_of_line = lf
+insert_final_newline = true
+indent_style = space
+indent_size = 2
+trim_trailing_whitespace = true
+
+# Set default charset
+[*.{js,py,go,scala,rb,java,html,css,less,sass,md}]
+charset = utf-8
+
+# Tab indentation (no size specified)
+[*.go]
+indent_style = tab
+
+[*.md]
+trim_trailing_whitespace = false
+
+# Matches the exact files either package.json or .travis.yml
+[{package.json,.travis.yml}]
+indent_style = space
+indent_size = 2
diff --git a/vendor/github.com/go-openapi/jsonpointer/.gitignore b/vendor/github.com/go-openapi/jsonpointer/.gitignore
new file mode 100644
index 00000000..769c2440
--- /dev/null
+++ b/vendor/github.com/go-openapi/jsonpointer/.gitignore
@@ -0,0 +1 @@
+secrets.yml
diff --git a/vendor/github.com/go-openapi/jsonpointer/.travis.yml b/vendor/github.com/go-openapi/jsonpointer/.travis.yml
new file mode 100644
index 00000000..9aef9184
--- /dev/null
+++ b/vendor/github.com/go-openapi/jsonpointer/.travis.yml
@@ -0,0 +1,15 @@
+after_success:
+- bash <(curl -s https://codecov.io/bash)
+go:
+- 1.11.x
+- 1.12.x
+install:
+- GO111MODULE=off go get -u gotest.tools/gotestsum
+env:
+- GO111MODULE=on
+language: go
+notifications:
+  slack:
+    secure: a5VgoiwB1G/AZqzmephPZIhEB9avMlsWSlVnM1dSAtYAwdrQHGTQxAmpOxYIoSPDhWNN5bfZmjd29++UlTwLcHSR+e0kJhH6IfDlsHj/HplNCJ9tyI0zYc7XchtdKgeMxMzBKCzgwFXGSbQGydXTliDNBo0HOzmY3cou/daMFTP60K+offcjS+3LRAYb1EroSRXZqrk1nuF/xDL3792DZUdPMiFR/L/Df6y74D6/QP4sTkTDFQitz4Wy/7jbsfj8dG6qK2zivgV6/l+w4OVjFkxVpPXogDWY10vVXNVynqxfJ7to2d1I9lNCHE2ilBCkWMIPdyJF7hjF8pKW+82yP4EzRh0vu8Xn0HT5MZpQxdRY/YMxNrWaG7SxsoEaO4q5uhgdzAqLYY3TRa7MjIK+7Ur+aqOeTXn6OKwVi0CjvZ6mIU3WUKSwiwkFZMbjRAkSb5CYwMEfGFO/z964xz83qGt6WAtBXNotqCQpTIiKtDHQeLOMfksHImCg6JLhQcWBVxamVgu0G3Pdh8Y6DyPnxraXY95+QDavbjqv7TeYT9T/FNnrkXaTTK0s4iWE5H4ACU0Qvz0wUYgfQrZv0/Hp7V17+rabUwnzYySHCy9SWX/7OV9Cfh31iMp9ZIffr76xmmThtOEqs8TrTtU6BWI3rWwvA9cXQipZTVtL0oswrGw=
+script:
+- gotestsum -f short-verbose -- -race -coverprofile=coverage.txt -covermode=atomic ./...
diff --git a/vendor/github.com/go-openapi/jsonpointer/CODE_OF_CONDUCT.md b/vendor/github.com/go-openapi/jsonpointer/CODE_OF_CONDUCT.md
new file mode 100644
index 00000000..9322b065
--- /dev/null
+++ b/vendor/github.com/go-openapi/jsonpointer/CODE_OF_CONDUCT.md
@@ -0,0 +1,74 @@
+# Contributor Covenant Code of Conduct
+
+## Our Pledge
+
+In the interest of fostering an open and welcoming environment, we as
+contributors and maintainers pledge to making participation in our project and
+our community a harassment-free experience for everyone, regardless of age, body
+size, disability, ethnicity, gender identity and expression, level of experience,
+nationality, personal appearance, race, religion, or sexual identity and
+orientation.
+
+## Our Standards
+
+Examples of behavior that contributes to creating a positive environment
+include:
+
+* Using welcoming and inclusive language
+* Being respectful of differing viewpoints and experiences
+* Gracefully accepting constructive criticism
+* Focusing on what is best for the community
+* Showing empathy towards other community members
+
+Examples of unacceptable behavior by participants include:
+
+* The use of sexualized language or imagery and unwelcome sexual attention or
+advances
+* Trolling, insulting/derogatory comments, and personal or political attacks
+* Public or private harassment
+* Publishing others' private information, such as a physical or electronic
+  address, without explicit permission
+* Other conduct which could reasonably be considered inappropriate in a
+  professional setting
+
+## Our Responsibilities
+
+Project maintainers are responsible for clarifying the standards of acceptable
+behavior and are expected to take appropriate and fair corrective action in
+response to any instances of unacceptable behavior.
+
+Project maintainers have the right and responsibility to remove, edit, or
+reject comments, commits, code, wiki edits, issues, and other contributions
+that are not aligned to this Code of Conduct, or to ban temporarily or
+permanently any contributor for other behaviors that they deem inappropriate,
+threatening, offensive, or harmful.
+
+## Scope
+
+This Code of Conduct applies both within project spaces and in public spaces
+when an individual is representing the project or its community. Examples of
+representing a project or community include using an official project e-mail
+address, posting via an official social media account, or acting as an appointed
+representative at an online or offline event. Representation of a project may be
+further defined and clarified by project maintainers.
+
+## Enforcement
+
+Instances of abusive, harassing, or otherwise unacceptable behavior may be
+reported by contacting the project team at ivan+abuse@flanders.co.nz. All
+complaints will be reviewed and investigated and will result in a response that
+is deemed necessary and appropriate to the circumstances. The project team is
+obligated to maintain confidentiality with regard to the reporter of an incident.
+Further details of specific enforcement policies may be posted separately.
+
+Project maintainers who do not follow or enforce the Code of Conduct in good
+faith may face temporary or permanent repercussions as determined by other
+members of the project's leadership.
+
+## Attribution
+
+This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4,
+available at [http://contributor-covenant.org/version/1/4][version]
+
+[homepage]: http://contributor-covenant.org
+[version]: http://contributor-covenant.org/version/1/4/
diff --git a/vendor/github.com/go-openapi/jsonpointer/LICENSE b/vendor/github.com/go-openapi/jsonpointer/LICENSE
new file mode 100644
index 00000000..d6456956
--- /dev/null
+++ b/vendor/github.com/go-openapi/jsonpointer/LICENSE
@@ -0,0 +1,202 @@
+
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff --git a/vendor/github.com/go-openapi/jsonpointer/README.md b/vendor/github.com/go-openapi/jsonpointer/README.md
new file mode 100644
index 00000000..813788af
--- /dev/null
+++ b/vendor/github.com/go-openapi/jsonpointer/README.md
@@ -0,0 +1,15 @@
+# gojsonpointer [![Build Status](https://travis-ci.org/go-openapi/jsonpointer.svg?branch=master)](https://travis-ci.org/go-openapi/jsonpointer) [![codecov](https://codecov.io/gh/go-openapi/jsonpointer/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/jsonpointer) [![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io)
+
+[![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/jsonpointer/master/LICENSE) [![GoDoc](https://godoc.org/github.com/go-openapi/jsonpointer?status.svg)](http://godoc.org/github.com/go-openapi/jsonpointer)
+An implementation of JSON Pointer - Go language
+
+## Status
+Completed YES
+
+Tested YES
+
+## References
+http://tools.ietf.org/html/draft-ietf-appsawg-json-pointer-07
+
+### Note
+The 4.Evaluation part of the previous reference, starting with 'If the currently referenced value is a JSON array, the reference token MUST contain either...' is not implemented.
diff --git a/vendor/github.com/go-openapi/jsonpointer/go.mod b/vendor/github.com/go-openapi/jsonpointer/go.mod
new file mode 100644
index 00000000..3e45e225
--- /dev/null
+++ b/vendor/github.com/go-openapi/jsonpointer/go.mod
@@ -0,0 +1,9 @@
+module github.com/go-openapi/jsonpointer
+
+require (
+	github.com/go-openapi/swag v0.19.5
+	github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e // indirect
+	github.com/stretchr/testify v1.3.0
+)
+
+go 1.13
diff --git a/vendor/github.com/go-openapi/jsonpointer/go.sum b/vendor/github.com/go-openapi/jsonpointer/go.sum
new file mode 100644
index 00000000..953d4f35
--- /dev/null
+++ b/vendor/github.com/go-openapi/jsonpointer/go.sum
@@ -0,0 +1,24 @@
+github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
+github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/go-openapi/swag v0.19.5 h1:lTz6Ys4CmqqCQmZPBlbQENR1/GucA2bzYTE12Pw4tFY=
+github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
+github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
+github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
+github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
+github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
+github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
+github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63 h1:nTT4s92Dgz2HlrB2NaMgvlfqHH39OgMhA7z3PK7PGD4=
+github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
+github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e h1:hB2xlXdHp/pmPZq0y3QnmWAArdw9PqbmotexnWx/FU8=
+github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
+github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
+github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q=
+github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=
+gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw=
+gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
diff --git a/vendor/github.com/go-openapi/jsonpointer/pointer.go b/vendor/github.com/go-openapi/jsonpointer/pointer.go
new file mode 100644
index 00000000..b284eb77
--- /dev/null
+++ b/vendor/github.com/go-openapi/jsonpointer/pointer.go
@@ -0,0 +1,390 @@
+// Copyright 2013 sigu-399 ( https://github.com/sigu-399 )
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// author       sigu-399
+// author-github  https://github.com/sigu-399
+// author-mail    sigu.399@gmail.com
+//
+// repository-name  jsonpointer
+// repository-desc  An implementation of JSON Pointer - Go language
+//
+// description    Main and unique file.
+//
+// created        25-02-2013
+
+package jsonpointer
+
+import (
+	"errors"
+	"fmt"
+	"reflect"
+	"strconv"
+	"strings"
+
+	"github.com/go-openapi/swag"
+)
+
+const (
+	emptyPointer     = ``
+	pointerSeparator = `/`
+
+	invalidStart = `JSON pointer must be empty or start with a "` + pointerSeparator
+)
+
+var jsonPointableType = reflect.TypeOf(new(JSONPointable)).Elem()
+var jsonSetableType = reflect.TypeOf(new(JSONSetable)).Elem()
+
+// JSONPointable is an interface for structs to implement when they need to customize the
+// json pointer process
+type JSONPointable interface {
+	JSONLookup(string) (interface{}, error)
+}
+
+// JSONSetable is an interface for structs to implement when they need to customize the
+// json pointer process
+type JSONSetable interface {
+	JSONSet(string, interface{}) error
+}
+
+// New creates a new json pointer for the given string
+func New(jsonPointerString string) (Pointer, error) {
+
+	var p Pointer
+	err := p.parse(jsonPointerString)
+	return p, err
+
+}
+
+// Pointer the json pointer reprsentation
+type Pointer struct {
+	referenceTokens []string
+}
+
+// "Constructor", parses the given string JSON pointer
+func (p *Pointer) parse(jsonPointerString string) error {
+
+	var err error
+
+	if jsonPointerString != emptyPointer {
+		if !strings.HasPrefix(jsonPointerString, pointerSeparator) {
+			err = errors.New(invalidStart)
+		} else {
+			referenceTokens := strings.Split(jsonPointerString, pointerSeparator)
+			for _, referenceToken := range referenceTokens[1:] {
+				p.referenceTokens = append(p.referenceTokens, referenceToken)
+			}
+		}
+	}
+
+	return err
+}
+
+// Get uses the pointer to retrieve a value from a JSON document
+func (p *Pointer) Get(document interface{}) (interface{}, reflect.Kind, error) {
+	return p.get(document, swag.DefaultJSONNameProvider)
+}
+
+// Set uses the pointer to set a value from a JSON document
+func (p *Pointer) Set(document interface{}, value interface{}) (interface{}, error) {
+	return document, p.set(document, value, swag.DefaultJSONNameProvider)
+}
+
+// GetForToken gets a value for a json pointer token 1 level deep
+func GetForToken(document interface{}, decodedToken string) (interface{}, reflect.Kind, error) {
+	return getSingleImpl(document, decodedToken, swag.DefaultJSONNameProvider)
+}
+
+// SetForToken gets a value for a json pointer token 1 level deep
+func SetForToken(document interface{}, decodedToken string, value interface{}) (interface{}, error) {
+	return document, setSingleImpl(document, value, decodedToken, swag.DefaultJSONNameProvider)
+}
+
+func getSingleImpl(node interface{}, decodedToken string, nameProvider *swag.NameProvider) (interface{}, reflect.Kind, error) {
+	rValue := reflect.Indirect(reflect.ValueOf(node))
+	kind := rValue.Kind()
+
+	switch kind {
+
+	case reflect.Struct:
+		if rValue.Type().Implements(jsonPointableType) {
+			r, err := node.(JSONPointable).JSONLookup(decodedToken)
+			if err != nil {
+				return nil, kind, err
+			}
+			return r, kind, nil
+		}
+		nm, ok := nameProvider.GetGoNameForType(rValue.Type(), decodedToken)
+		if !ok {
+			return nil, kind, fmt.Errorf("object has no field %q", decodedToken)
+		}
+		fld := rValue.FieldByName(nm)
+		return fld.Interface(), kind, nil
+
+	case reflect.Map:
+		kv := reflect.ValueOf(decodedToken)
+		mv := rValue.MapIndex(kv)
+
+		if mv.IsValid() {
+			return mv.Interface(), kind, nil
+		}
+		return nil, kind, fmt.Errorf("object has no key %q", decodedToken)
+
+	case reflect.Slice:
+		tokenIndex, err := strconv.Atoi(decodedToken)
+		if err != nil {
+			return nil, kind, err
+		}
+		sLength := rValue.Len()
+		if tokenIndex < 0 || tokenIndex >= sLength {
+			return nil, kind, fmt.Errorf("index out of bounds array[0,%d] index '%d'", sLength-1, tokenIndex)
+		}
+
+		elem := rValue.Index(tokenIndex)
+		return elem.Interface(), kind, nil
+
+	default:
+		return nil, kind, fmt.Errorf("invalid token reference %q", decodedToken)
+	}
+
+}
+
+func setSingleImpl(node, data interface{}, decodedToken string, nameProvider *swag.NameProvider) error {
+	rValue := reflect.Indirect(reflect.ValueOf(node))
+	switch rValue.Kind() {
+
+	case reflect.Struct:
+		if ns, ok := node.(JSONSetable); ok { // pointer impl
+			return ns.JSONSet(decodedToken, data)
+		}
+
+		if rValue.Type().Implements(jsonSetableType) {
+			return node.(JSONSetable).JSONSet(decodedToken, data)
+		}
+
+		nm, ok := nameProvider.GetGoNameForType(rValue.Type(), decodedToken)
+		if !ok {
+			return fmt.Errorf("object has no field %q", decodedToken)
+		}
+		fld := rValue.FieldByName(nm)
+		if fld.IsValid() {
+			fld.Set(reflect.ValueOf(data))
+		}
+		return nil
+
+	case reflect.Map:
+		kv := reflect.ValueOf(decodedToken)
+		rValue.SetMapIndex(kv, reflect.ValueOf(data))
+		return nil
+
+	case reflect.Slice:
+		tokenIndex, err := strconv.Atoi(decodedToken)
+		if err != nil {
+			return err
+		}
+		sLength := rValue.Len()
+		if tokenIndex < 0 || tokenIndex >= sLength {
+			return fmt.Errorf("index out of bounds array[0,%d] index '%d'", sLength, tokenIndex)
+		}
+
+		elem := rValue.Index(tokenIndex)
+		if !elem.CanSet() {
+			return fmt.Errorf("can't set slice index %s to %v", decodedToken, data)
+		}
+		elem.Set(reflect.ValueOf(data))
+		return nil
+
+	default:
+		return fmt.Errorf("invalid token reference %q", decodedToken)
+	}
+
+}
+
+func (p *Pointer) get(node interface{}, nameProvider *swag.NameProvider) (interface{}, reflect.Kind, error) {
+
+	if nameProvider == nil {
+		nameProvider = swag.DefaultJSONNameProvider
+	}
+
+	kind := reflect.Invalid
+
+	// Full document when empty
+	if len(p.referenceTokens) == 0 {
+		return node, kind, nil
+	}
+
+	for _, token := range p.referenceTokens {
+
+		decodedToken := Unescape(token)
+
+		r, knd, err := getSingleImpl(node, decodedToken, nameProvider)
+		if err != nil {
+			return nil, knd, err
+		}
+		node, kind = r, knd
+
+	}
+
+	rValue := reflect.ValueOf(node)
+	kind = rValue.Kind()
+
+	return node, kind, nil
+}
+
+func (p *Pointer) set(node, data interface{}, nameProvider *swag.NameProvider) error {
+	knd := reflect.ValueOf(node).Kind()
+
+	if knd != reflect.Ptr && knd != reflect.Struct && knd != reflect.Map && knd != reflect.Slice && knd != reflect.Array {
+		return fmt.Errorf("only structs, pointers, maps and slices are supported for setting values")
+	}
+
+	if nameProvider == nil {
+		nameProvider = swag.DefaultJSONNameProvider
+	}
+
+	// Full document when empty
+	if len(p.referenceTokens) == 0 {
+		return nil
+	}
+
+	lastI := len(p.referenceTokens) - 1
+	for i, token := range p.referenceTokens {
+		isLastToken := i == lastI
+		decodedToken := Unescape(token)
+
+		if isLastToken {
+
+			return setSingleImpl(node, data, decodedToken, nameProvider)
+		}
+
+		rValue := reflect.Indirect(reflect.ValueOf(node))
+		kind := rValue.Kind()
+
+		switch kind {
+
+		case reflect.Struct:
+			if rValue.Type().Implements(jsonPointableType) {
+				r, err := node.(JSONPointable).JSONLookup(decodedToken)
+				if err != nil {
+					return err
+				}
+				fld := reflect.ValueOf(r)
+				if fld.CanAddr() && fld.Kind() != reflect.Interface && fld.Kind() != reflect.Map && fld.Kind() != reflect.Slice && fld.Kind() != reflect.Ptr {
+					node = fld.Addr().Interface()
+					continue
+				}
+				node = r
+				continue
+			}
+			nm, ok := nameProvider.GetGoNameForType(rValue.Type(), decodedToken)
+			if !ok {
+				return fmt.Errorf("object has no field %q", decodedToken)
+			}
+			fld := rValue.FieldByName(nm)
+			if fld.CanAddr() && fld.Kind() != reflect.Interface && fld.Kind() != reflect.Map && fld.Kind() != reflect.Slice && fld.Kind() != reflect.Ptr {
+				node = fld.Addr().Interface()
+				continue
+			}
+			node = fld.Interface()
+
+		case reflect.Map:
+			kv := reflect.ValueOf(decodedToken)
+			mv := rValue.MapIndex(kv)
+
+			if !mv.IsValid() {
+				return fmt.Errorf("object has no key %q", decodedToken)
+			}
+			if mv.CanAddr() && mv.Kind() != reflect.Interface && mv.Kind() != reflect.Map && mv.Kind() != reflect.Slice && mv.Kind() != reflect.Ptr {
+				node = mv.Addr().Interface()
+				continue
+			}
+			node = mv.Interface()
+
+		case reflect.Slice:
+			tokenIndex, err := strconv.Atoi(decodedToken)
+			if err != nil {
+				return err
+			}
+			sLength := rValue.Len()
+			if tokenIndex < 0 || tokenIndex >= sLength {
+				return fmt.Errorf("index out of bounds array[0,%d] index '%d'", sLength, tokenIndex)
+			}
+
+			elem := rValue.Index(tokenIndex)
+			if elem.CanAddr() && elem.Kind() != reflect.Interface && elem.Kind() != reflect.Map && elem.Kind() != reflect.Slice && elem.Kind() != reflect.Ptr {
+				node = elem.Addr().Interface()
+				continue
+			}
+			node = elem.Interface()
+
+		default:
+			return fmt.Errorf("invalid token reference %q", decodedToken)
+		}
+
+	}
+
+	return nil
+}
+
+// DecodedTokens returns the decoded tokens
+func (p *Pointer) DecodedTokens() []string {
+	result := make([]string, 0, len(p.referenceTokens))
+	for _, t := range p.referenceTokens {
+		result = append(result, Unescape(t))
+	}
+	return result
+}
+
+// IsEmpty returns true if this is an empty json pointer
+// this indicates that it points to the root document
+func (p *Pointer) IsEmpty() bool {
+	return len(p.referenceTokens) == 0
+}
+
+// Pointer to string representation function
+func (p *Pointer) String() string {
+
+	if len(p.referenceTokens) == 0 {
+		return emptyPointer
+	}
+
+	pointerString := pointerSeparator + strings.Join(p.referenceTokens, pointerSeparator)
+
+	return pointerString
+}
+
+// Specific JSON pointer encoding here
+// ~0 => ~
+// ~1 => /
+// ... and vice versa
+
+const (
+	encRefTok0 = `~0`
+	encRefTok1 = `~1`
+	decRefTok0 = `~`
+	decRefTok1 = `/`
+)
+
+// Unescape unescapes a json pointer reference token string to the original representation
+func Unescape(token string) string {
+	step1 := strings.Replace(token, encRefTok1, decRefTok1, -1)
+	step2 := strings.Replace(step1, encRefTok0, decRefTok0, -1)
+	return step2
+}
+
+// Escape escapes a pointer reference token string
+func Escape(token string) string {
+	step1 := strings.Replace(token, decRefTok0, encRefTok0, -1)
+	step2 := strings.Replace(step1, decRefTok1, encRefTok1, -1)
+	return step2
+}
diff --git a/vendor/github.com/go-openapi/jsonreference/.gitignore b/vendor/github.com/go-openapi/jsonreference/.gitignore
new file mode 100644
index 00000000..769c2440
--- /dev/null
+++ b/vendor/github.com/go-openapi/jsonreference/.gitignore
@@ -0,0 +1 @@
+secrets.yml
diff --git a/vendor/github.com/go-openapi/jsonreference/.travis.yml b/vendor/github.com/go-openapi/jsonreference/.travis.yml
new file mode 100644
index 00000000..40b90757
--- /dev/null
+++ b/vendor/github.com/go-openapi/jsonreference/.travis.yml
@@ -0,0 +1,15 @@
+after_success:
+- bash <(curl -s https://codecov.io/bash)
+go:
+- 1.11.x
+- 1.12.x
+install:
+- GO111MODULE=off go get -u gotest.tools/gotestsum
+env:
+- GO111MODULE=on
+language: go
+notifications:
+  slack:
+    secure: OpQG/36F7DSF00HLm9WZMhyqFCYYyYTsVDObW226cWiR8PWYiNfLZiSEvIzT1Gx4dDjhigKTIqcLhG34CkL5iNXDjm9Yyo2RYhQPlK8NErNqUEXuBqn4RqYHW48VGhEhOyDd4Ei0E2FN5ZbgpvHgtpkdZ6XDi64r3Ac89isP9aPHXQTuv2Jog6b4/OKKiUTftLcTIst0p4Cp3gqOJWf1wnoj+IadWiECNVQT6zb47IYjtyw6+uV8iUjTzdKcRB6Zc6b4Dq7JAg1Zd7Jfxkql3hlKp4PNlRf9Cy7y5iA3G7MLyg3FcPX5z2kmcyPt2jOTRMBWUJ5zIQpOxizAcN8WsT3WWBL5KbuYK6k0PzujrIDLqdxGpNmjkkMfDBT9cKmZpm2FdW+oZgPFJP+oKmAo4u4KJz/vjiPTXgQlN5bmrLuRMCp+AwC5wkIohTqWZVPE2TK6ZSnMYcg/W39s+RP/9mJoyryAvPSpBOLTI+biCgaUCTOAZxNTWpMFc3tPYntc41WWkdKcooZ9JA5DwfcaVFyTGQ3YXz+HvX6G1z/gW0Q/A4dBi9mj2iE1xm7tRTT+4VQ2AXFvSEI1HJpfPgYnwAtwOD1v3Qm2EUHk9sCdtEDR4wVGEPIVn44GnwFMnGKx9JWppMPYwFu3SVDdHt+E+LOlhZUply11Aa+IVrT2KUQ=
+script:
+- gotestsum -f short-verbose -- -race -coverprofile=coverage.txt -covermode=atomic ./...
diff --git a/vendor/github.com/go-openapi/jsonreference/CODE_OF_CONDUCT.md b/vendor/github.com/go-openapi/jsonreference/CODE_OF_CONDUCT.md
new file mode 100644
index 00000000..9322b065
--- /dev/null
+++ b/vendor/github.com/go-openapi/jsonreference/CODE_OF_CONDUCT.md
@@ -0,0 +1,74 @@
+# Contributor Covenant Code of Conduct
+
+## Our Pledge
+
+In the interest of fostering an open and welcoming environment, we as
+contributors and maintainers pledge to making participation in our project and
+our community a harassment-free experience for everyone, regardless of age, body
+size, disability, ethnicity, gender identity and expression, level of experience,
+nationality, personal appearance, race, religion, or sexual identity and
+orientation.
+
+## Our Standards
+
+Examples of behavior that contributes to creating a positive environment
+include:
+
+* Using welcoming and inclusive language
+* Being respectful of differing viewpoints and experiences
+* Gracefully accepting constructive criticism
+* Focusing on what is best for the community
+* Showing empathy towards other community members
+
+Examples of unacceptable behavior by participants include:
+
+* The use of sexualized language or imagery and unwelcome sexual attention or
+advances
+* Trolling, insulting/derogatory comments, and personal or political attacks
+* Public or private harassment
+* Publishing others' private information, such as a physical or electronic
+  address, without explicit permission
+* Other conduct which could reasonably be considered inappropriate in a
+  professional setting
+
+## Our Responsibilities
+
+Project maintainers are responsible for clarifying the standards of acceptable
+behavior and are expected to take appropriate and fair corrective action in
+response to any instances of unacceptable behavior.
+
+Project maintainers have the right and responsibility to remove, edit, or
+reject comments, commits, code, wiki edits, issues, and other contributions
+that are not aligned to this Code of Conduct, or to ban temporarily or
+permanently any contributor for other behaviors that they deem inappropriate,
+threatening, offensive, or harmful.
+
+## Scope
+
+This Code of Conduct applies both within project spaces and in public spaces
+when an individual is representing the project or its community. Examples of
+representing a project or community include using an official project e-mail
+address, posting via an official social media account, or acting as an appointed
+representative at an online or offline event. Representation of a project may be
+further defined and clarified by project maintainers.
+
+## Enforcement
+
+Instances of abusive, harassing, or otherwise unacceptable behavior may be
+reported by contacting the project team at ivan+abuse@flanders.co.nz. All
+complaints will be reviewed and investigated and will result in a response that
+is deemed necessary and appropriate to the circumstances. The project team is
+obligated to maintain confidentiality with regard to the reporter of an incident.
+Further details of specific enforcement policies may be posted separately.
+
+Project maintainers who do not follow or enforce the Code of Conduct in good
+faith may face temporary or permanent repercussions as determined by other
+members of the project's leadership.
+
+## Attribution
+
+This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4,
+available at [http://contributor-covenant.org/version/1/4][version]
+
+[homepage]: http://contributor-covenant.org
+[version]: http://contributor-covenant.org/version/1/4/
diff --git a/vendor/github.com/go-openapi/jsonreference/LICENSE b/vendor/github.com/go-openapi/jsonreference/LICENSE
new file mode 100644
index 00000000..d6456956
--- /dev/null
+++ b/vendor/github.com/go-openapi/jsonreference/LICENSE
@@ -0,0 +1,202 @@
+
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff --git a/vendor/github.com/go-openapi/jsonreference/README.md b/vendor/github.com/go-openapi/jsonreference/README.md
new file mode 100644
index 00000000..66345f4c
--- /dev/null
+++ b/vendor/github.com/go-openapi/jsonreference/README.md
@@ -0,0 +1,15 @@
+# gojsonreference [![Build Status](https://travis-ci.org/go-openapi/jsonreference.svg?branch=master)](https://travis-ci.org/go-openapi/jsonreference) [![codecov](https://codecov.io/gh/go-openapi/jsonreference/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/jsonreference) [![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io)
+
+[![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/jsonreference/master/LICENSE) [![GoDoc](https://godoc.org/github.com/go-openapi/jsonreference?status.svg)](http://godoc.org/github.com/go-openapi/jsonreference)
+An implementation of JSON Reference - Go language
+
+## Status
+Work in progress ( 90% done )
+
+## Dependencies
+https://github.com/go-openapi/jsonpointer
+
+## References
+http://tools.ietf.org/html/draft-ietf-appsawg-json-pointer-07
+
+http://tools.ietf.org/html/draft-pbryan-zyp-json-ref-03
diff --git a/vendor/github.com/go-openapi/jsonreference/go.mod b/vendor/github.com/go-openapi/jsonreference/go.mod
new file mode 100644
index 00000000..aff1d016
--- /dev/null
+++ b/vendor/github.com/go-openapi/jsonreference/go.mod
@@ -0,0 +1,12 @@
+module github.com/go-openapi/jsonreference
+
+require (
+	github.com/PuerkitoBio/purell v1.1.1
+	github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 // indirect
+	github.com/go-openapi/jsonpointer v0.19.3
+	github.com/stretchr/testify v1.3.0
+	golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297 // indirect
+	golang.org/x/text v0.3.2 // indirect
+)
+
+go 1.13
diff --git a/vendor/github.com/go-openapi/jsonreference/go.sum b/vendor/github.com/go-openapi/jsonreference/go.sum
new file mode 100644
index 00000000..c7ceab58
--- /dev/null
+++ b/vendor/github.com/go-openapi/jsonreference/go.sum
@@ -0,0 +1,44 @@
+github.com/PuerkitoBio/purell v1.1.1 h1:WEQqlqaGbrPkxLJWfBwQmfEAE1Z7ONdDLqrN38tNFfI=
+github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
+github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV/sSk/8dngufqelfh6jnri85riMAaF/M=
+github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
+github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
+github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/go-openapi/jsonpointer v0.19.2 h1:A9+F4Dc/MCNB5jibxf6rRvOvR/iFgQdyNx9eIhnGqq0=
+github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg=
+github.com/go-openapi/jsonpointer v0.19.3 h1:gihV7YNZK1iK6Tgwwsxo2rJbD1GTbdm72325Bq8FI3w=
+github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
+github.com/go-openapi/swag v0.19.2 h1:jvO6bCMBEilGwMfHhrd61zIID4oIFdwb76V17SM88dE=
+github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
+github.com/go-openapi/swag v0.19.5 h1:lTz6Ys4CmqqCQmZPBlbQENR1/GucA2bzYTE12Pw4tFY=
+github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
+github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
+github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
+github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
+github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
+github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
+github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63 h1:nTT4s92Dgz2HlrB2NaMgvlfqHH39OgMhA7z3PK7PGD4=
+github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
+github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e h1:hB2xlXdHp/pmPZq0y3QnmWAArdw9PqbmotexnWx/FU8=
+github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
+github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
+github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q=
+github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
+golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
+golang.org/x/net v0.0.0-20190613194153-d28f0bde5980 h1:dfGZHvZk057jK2MCeWus/TowKpJ8y4AmooUzdBSR9GU=
+golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297 h1:k7pJ2yAPLPgbskkFdhRCsA77k2fySZ1zf2zCjvQCiIM=
+golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=
+golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
+golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=
+gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw=
+gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
diff --git a/vendor/github.com/go-openapi/jsonreference/reference.go b/vendor/github.com/go-openapi/jsonreference/reference.go
new file mode 100644
index 00000000..3bc0a6e2
--- /dev/null
+++ b/vendor/github.com/go-openapi/jsonreference/reference.go
@@ -0,0 +1,156 @@
+// Copyright 2013 sigu-399 ( https://github.com/sigu-399 )
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// author       sigu-399
+// author-github  https://github.com/sigu-399
+// author-mail    sigu.399@gmail.com
+//
+// repository-name  jsonreference
+// repository-desc  An implementation of JSON Reference - Go language
+//
+// description    Main and unique file.
+//
+// created        26-02-2013
+
+package jsonreference
+
+import (
+	"errors"
+	"net/url"
+	"strings"
+
+	"github.com/PuerkitoBio/purell"
+	"github.com/go-openapi/jsonpointer"
+)
+
+const (
+	fragmentRune = `#`
+)
+
+// New creates a new reference for the given string
+func New(jsonReferenceString string) (Ref, error) {
+
+	var r Ref
+	err := r.parse(jsonReferenceString)
+	return r, err
+
+}
+
+// MustCreateRef parses the ref string and panics when it's invalid.
+// Use the New method for a version that returns an error
+func MustCreateRef(ref string) Ref {
+	r, err := New(ref)
+	if err != nil {
+		panic(err)
+	}
+	return r
+}
+
+// Ref represents a json reference object
+type Ref struct {
+	referenceURL     *url.URL
+	referencePointer jsonpointer.Pointer
+
+	HasFullURL      bool
+	HasURLPathOnly  bool
+	HasFragmentOnly bool
+	HasFileScheme   bool
+	HasFullFilePath bool
+}
+
+// GetURL gets the URL for this reference
+func (r *Ref) GetURL() *url.URL {
+	return r.referenceURL
+}
+
+// GetPointer gets the json pointer for this reference
+func (r *Ref) GetPointer() *jsonpointer.Pointer {
+	return &r.referencePointer
+}
+
+// String returns the best version of the url for this reference
+func (r *Ref) String() string {
+
+	if r.referenceURL != nil {
+		return r.referenceURL.String()
+	}
+
+	if r.HasFragmentOnly {
+		return fragmentRune + r.referencePointer.String()
+	}
+
+	return r.referencePointer.String()
+}
+
+// IsRoot returns true if this reference is a root document
+func (r *Ref) IsRoot() bool {
+	return r.referenceURL != nil &&
+		!r.IsCanonical() &&
+		!r.HasURLPathOnly &&
+		r.referenceURL.Fragment == ""
+}
+
+// IsCanonical returns true when this pointer starts with http(s):// or file://
+func (r *Ref) IsCanonical() bool {
+	return (r.HasFileScheme && r.HasFullFilePath) || (!r.HasFileScheme && r.HasFullURL)
+}
+
+// "Constructor", parses the given string JSON reference
+func (r *Ref) parse(jsonReferenceString string) error {
+
+	parsed, err := url.Parse(jsonReferenceString)
+	if err != nil {
+		return err
+	}
+
+	r.referenceURL, _ = url.Parse(purell.NormalizeURL(parsed, purell.FlagsSafe|purell.FlagRemoveDuplicateSlashes))
+	refURL := r.referenceURL
+
+	if refURL.Scheme != "" && refURL.Host != "" {
+		r.HasFullURL = true
+	} else {
+		if refURL.Path != "" {
+			r.HasURLPathOnly = true
+		} else if refURL.RawQuery == "" && refURL.Fragment != "" {
+			r.HasFragmentOnly = true
+		}
+	}
+
+	r.HasFileScheme = refURL.Scheme == "file"
+	r.HasFullFilePath = strings.HasPrefix(refURL.Path, "/")
+
+	// invalid json-pointer error means url has no json-pointer fragment. simply ignore error
+	r.referencePointer, _ = jsonpointer.New(refURL.Fragment)
+
+	return nil
+}
+
+// Inherits creates a new reference from a parent and a child
+// If the child cannot inherit from the parent, an error is returned
+func (r *Ref) Inherits(child Ref) (*Ref, error) {
+	childURL := child.GetURL()
+	parentURL := r.GetURL()
+	if childURL == nil {
+		return nil, errors.New("child url is nil")
+	}
+	if parentURL == nil {
+		return &child, nil
+	}
+
+	ref, err := New(parentURL.ResolveReference(childURL).String())
+	if err != nil {
+		return nil, err
+	}
+	return &ref, nil
+}
diff --git a/vendor/github.com/go-openapi/spec/.editorconfig b/vendor/github.com/go-openapi/spec/.editorconfig
new file mode 100644
index 00000000..3152da69
--- /dev/null
+++ b/vendor/github.com/go-openapi/spec/.editorconfig
@@ -0,0 +1,26 @@
+# top-most EditorConfig file
+root = true
+
+# Unix-style newlines with a newline ending every file
+[*]
+end_of_line = lf
+insert_final_newline = true
+indent_style = space
+indent_size = 2
+trim_trailing_whitespace = true
+
+# Set default charset
+[*.{js,py,go,scala,rb,java,html,css,less,sass,md}]
+charset = utf-8
+
+# Tab indentation (no size specified)
+[*.go]
+indent_style = tab
+
+[*.md]
+trim_trailing_whitespace = false
+
+# Matches the exact files either package.json or .travis.yml
+[{package.json,.travis.yml}]
+indent_style = space
+indent_size = 2
diff --git a/vendor/github.com/go-openapi/spec/.gitignore b/vendor/github.com/go-openapi/spec/.gitignore
new file mode 100644
index 00000000..dd91ed6a
--- /dev/null
+++ b/vendor/github.com/go-openapi/spec/.gitignore
@@ -0,0 +1,2 @@
+secrets.yml
+coverage.out
diff --git a/vendor/github.com/go-openapi/spec/.golangci.yml b/vendor/github.com/go-openapi/spec/.golangci.yml
new file mode 100644
index 00000000..3e33f9f2
--- /dev/null
+++ b/vendor/github.com/go-openapi/spec/.golangci.yml
@@ -0,0 +1,23 @@
+linters-settings:
+  govet:
+    check-shadowing: true
+  golint:
+    min-confidence: 0
+  gocyclo:
+    min-complexity: 45
+  maligned:
+    suggest-new: true
+  dupl:
+    threshold: 200
+  goconst:
+    min-len: 2
+    min-occurrences: 2
+
+linters:
+  enable-all: true
+  disable:
+    - maligned
+    - unparam
+    - lll
+    - gochecknoinits
+    - gochecknoglobals
diff --git a/vendor/github.com/go-openapi/spec/.travis.yml b/vendor/github.com/go-openapi/spec/.travis.yml
new file mode 100644
index 00000000..aa26d876
--- /dev/null
+++ b/vendor/github.com/go-openapi/spec/.travis.yml
@@ -0,0 +1,15 @@
+after_success:
+- bash <(curl -s https://codecov.io/bash)
+go:
+- 1.11.x
+- 1.12.x
+install:
+- GO111MODULE=off go get -u gotest.tools/gotestsum
+env:
+- GO111MODULE=on
+language: go
+notifications:
+  slack:
+    secure: QUWvCkBBK09GF7YtEvHHVt70JOkdlNBG0nIKu/5qc4/nW5HP8I2w0SEf/XR2je0eED1Qe3L/AfMCWwrEj+IUZc3l4v+ju8X8R3Lomhme0Eb0jd1MTMCuPcBT47YCj0M7RON7vXtbFfm1hFJ/jLe5+9FXz0hpXsR24PJc5ZIi/ogNwkaPqG4BmndzecpSh0vc2FJPZUD9LT0I09REY/vXR0oQAalLkW0asGD5taHZTUZq/kBpsNxaAFrLM23i4mUcf33M5fjLpvx5LRICrX/57XpBrDh2TooBU6Qj3CgoY0uPRYUmSNxbVx1czNzl2JtEpb5yjoxfVPQeg0BvQM00G8LJINISR+ohrjhkZmAqchDupAX+yFrxTtORa78CtnIL6z/aTNlgwwVD8kvL/1pFA/JWYmKDmz93mV/+6wubGzNSQCstzjkFA4/iZEKewKUoRIAi/fxyscP6L/rCpmY/4llZZvrnyTqVbt6URWpopUpH4rwYqreXAtJxJsfBJIeSmUIiDIOMGkCTvyTEW3fWGmGoqWtSHLoaWDyAIGb7azb+KvfpWtEcoPFWfSWU+LGee0A/YsUhBl7ADB9A0CJEuR8q4BPpKpfLwPKSiKSAXL7zDkyjExyhtgqbSl2jS+rKIHOZNL8JkCcTP2MKMVd563C5rC5FMKqu3S9m2b6380E=
+script:
+- gotestsum -f short-verbose -- -race -coverprofile=coverage.txt -covermode=atomic ./...
diff --git a/vendor/github.com/go-openapi/spec/CODE_OF_CONDUCT.md b/vendor/github.com/go-openapi/spec/CODE_OF_CONDUCT.md
new file mode 100644
index 00000000..9322b065
--- /dev/null
+++ b/vendor/github.com/go-openapi/spec/CODE_OF_CONDUCT.md
@@ -0,0 +1,74 @@
+# Contributor Covenant Code of Conduct
+
+## Our Pledge
+
+In the interest of fostering an open and welcoming environment, we as
+contributors and maintainers pledge to making participation in our project and
+our community a harassment-free experience for everyone, regardless of age, body
+size, disability, ethnicity, gender identity and expression, level of experience,
+nationality, personal appearance, race, religion, or sexual identity and
+orientation.
+
+## Our Standards
+
+Examples of behavior that contributes to creating a positive environment
+include:
+
+* Using welcoming and inclusive language
+* Being respectful of differing viewpoints and experiences
+* Gracefully accepting constructive criticism
+* Focusing on what is best for the community
+* Showing empathy towards other community members
+
+Examples of unacceptable behavior by participants include:
+
+* The use of sexualized language or imagery and unwelcome sexual attention or
+advances
+* Trolling, insulting/derogatory comments, and personal or political attacks
+* Public or private harassment
+* Publishing others' private information, such as a physical or electronic
+  address, without explicit permission
+* Other conduct which could reasonably be considered inappropriate in a
+  professional setting
+
+## Our Responsibilities
+
+Project maintainers are responsible for clarifying the standards of acceptable
+behavior and are expected to take appropriate and fair corrective action in
+response to any instances of unacceptable behavior.
+
+Project maintainers have the right and responsibility to remove, edit, or
+reject comments, commits, code, wiki edits, issues, and other contributions
+that are not aligned to this Code of Conduct, or to ban temporarily or
+permanently any contributor for other behaviors that they deem inappropriate,
+threatening, offensive, or harmful.
+
+## Scope
+
+This Code of Conduct applies both within project spaces and in public spaces
+when an individual is representing the project or its community. Examples of
+representing a project or community include using an official project e-mail
+address, posting via an official social media account, or acting as an appointed
+representative at an online or offline event. Representation of a project may be
+further defined and clarified by project maintainers.
+
+## Enforcement
+
+Instances of abusive, harassing, or otherwise unacceptable behavior may be
+reported by contacting the project team at ivan+abuse@flanders.co.nz. All
+complaints will be reviewed and investigated and will result in a response that
+is deemed necessary and appropriate to the circumstances. The project team is
+obligated to maintain confidentiality with regard to the reporter of an incident.
+Further details of specific enforcement policies may be posted separately.
+
+Project maintainers who do not follow or enforce the Code of Conduct in good
+faith may face temporary or permanent repercussions as determined by other
+members of the project's leadership.
+
+## Attribution
+
+This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4,
+available at [http://contributor-covenant.org/version/1/4][version]
+
+[homepage]: http://contributor-covenant.org
+[version]: http://contributor-covenant.org/version/1/4/
diff --git a/vendor/github.com/go-openapi/spec/LICENSE b/vendor/github.com/go-openapi/spec/LICENSE
new file mode 100644
index 00000000..d6456956
--- /dev/null
+++ b/vendor/github.com/go-openapi/spec/LICENSE
@@ -0,0 +1,202 @@
+
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff --git a/vendor/github.com/go-openapi/spec/README.md b/vendor/github.com/go-openapi/spec/README.md
new file mode 100644
index 00000000..6354742c
--- /dev/null
+++ b/vendor/github.com/go-openapi/spec/README.md
@@ -0,0 +1,10 @@
+# OAI object model [![Build Status](https://travis-ci.org/go-openapi/spec.svg?branch=master)](https://travis-ci.org/go-openapi/spec) [![codecov](https://codecov.io/gh/go-openapi/spec/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/spec) [![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io)
+
+[![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/spec/master/LICENSE)
+[![GoDoc](https://godoc.org/github.com/go-openapi/spec?status.svg)](http://godoc.org/github.com/go-openapi/spec)
+[![GolangCI](https://golangci.com/badges/github.com/go-openapi/spec.svg)](https://golangci.com)
+[![Go Report Card](https://goreportcard.com/badge/github.com/go-openapi/spec)](https://goreportcard.com/report/github.com/go-openapi/spec)
+
+The object model for OpenAPI specification documents.
+
+Currently supports Swagger 2.0.
diff --git a/vendor/github.com/go-openapi/spec/bindata.go b/vendor/github.com/go-openapi/spec/bindata.go
new file mode 100644
index 00000000..c67e2d87
--- /dev/null
+++ b/vendor/github.com/go-openapi/spec/bindata.go
@@ -0,0 +1,297 @@
+// Code generated by go-bindata. DO NOT EDIT.
+// sources:
+// schemas/jsonschema-draft-04.json (4.357kB)
+// schemas/v2/schema.json (40.249kB)
+
+package spec
+
+import (
+	"bytes"
+	"compress/gzip"
+	"crypto/sha256"
+	"fmt"
+	"io"
+	"io/ioutil"
+	"os"
+	"path/filepath"
+	"strings"
+	"time"
+)
+
+func bindataRead(data []byte, name string) ([]byte, error) {
+	gz, err := gzip.NewReader(bytes.NewBuffer(data))
+	if err != nil {
+		return nil, fmt.Errorf("read %q: %v", name, err)
+	}
+
+	var buf bytes.Buffer
+	_, err = io.Copy(&buf, gz)
+	clErr := gz.Close()
+
+	if err != nil {
+		return nil, fmt.Errorf("read %q: %v", name, err)
+	}
+	if clErr != nil {
+		return nil, err
+	}
+
+	return buf.Bytes(), nil
+}
+
+type asset struct {
+	bytes  []byte
+	info   os.FileInfo
+	digest [sha256.Size]byte
+}
+
+type bindataFileInfo struct {
+	name    string
+	size    int64
+	mode    os.FileMode
+	modTime time.Time
+}
+
+func (fi bindataFileInfo) Name() string {
+	return fi.name
+}
+func (fi bindataFileInfo) Size() int64 {
+	return fi.size
+}
+func (fi bindataFileInfo) Mode() os.FileMode {
+	return fi.mode
+}
+func (fi bindataFileInfo) ModTime() time.Time {
+	return fi.modTime
+}
+func (fi bindataFileInfo) IsDir() bool {
+	return false
+}
+func (fi bindataFileInfo) Sys() interface{} {
+	return nil
+}
+
+var _jsonschemaDraft04JSON = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xc4\x57\x3d\x6f\xdb\x3c\x10\xde\xf3\x2b\x08\x26\x63\xf2\x2a\x2f\xd0\xc9\x5b\xd1\x2e\x01\x5a\x34\x43\x37\x23\x03\x6d\x9d\x6c\x06\x14\xa9\x50\x54\x60\xc3\xd0\x7f\x2f\x28\x4a\x14\x29\x91\x92\x2d\xa7\x8d\x97\x28\xbc\xaf\xe7\x8e\xf7\xc5\xd3\x0d\x42\x08\x61\x9a\xe2\x15\xc2\x7b\xa5\x8a\x55\x92\xbc\x96\x82\x3f\x94\xdb\x3d\xe4\xe4\x3f\x21\x77\x49\x2a\x49\xa6\x1e\x1e\xbf\x24\xe6\xec\x16\xdf\x1b\xa1\x3b\xf3\xff\x02\xc9\x14\xca\xad\xa4\x85\xa2\x82\x6b\xe9\x6f\x42\x02\x32\x2c\x28\x07\x45\x5a\x15\x3d\x77\x46\x39\xd5\xcc\x25\x5e\x21\x83\xb8\x21\x18\xb6\xaf\x52\x92\xa3\x47\x68\x88\xea\x58\x80\x56\x4e\x1a\xf2\xbd\x4f\xcc\x29\x7f\x52\x90\x6b\x7d\xff\x0f\x48\xb4\x3d\x3f\x21\x7c\x27\x21\xd3\x2a\x6e\x31\xaa\x2d\x53\xdd\xf3\xe3\x42\x94\x54\xd1\x77\x78\xe2\x0a\x76\x20\xe3\x20\x68\xcb\x30\x86\x41\xf3\x2a\xc7\x2b\xf4\x78\x8e\xfe\xef\x90\x91\x8a\xa9\xc7\xb1\x1d\xc2\xd8\x2f\x0d\x75\xed\xc1\x4e\x9c\xc8\x25\x43\xac\xa8\xbe\xd7\xcc\xa9\xd1\xa9\x21\xa0\x1a\xbd\x04\x61\x94\x34\x2f\x18\xfc\x3e\x16\x50\x8e\x4d\x03\x6f\x1c\x58\xdb\x48\x23\xbc\x11\x82\x01\xe1\xfa\xd3\x3a\x8e\x30\xaf\x18\x33\x7f\xf3\x8d\x39\x11\x9b\x57\xd8\x2a\xfd\x55\x2a\x49\xf9\x0e\xc7\xec\x37\xd4\x25\xf7\xec\x5c\x66\xc7\xd7\x99\xaa\xcf\x4f\x89\x8a\xd3\xb7\x0a\x3a\xaa\x92\x15\xf4\x30\x6f\x1c\xb0\xd6\x46\xe7\x98\x39\x2d\xa4\x28\x40\x2a\x3a\x88\x9e\x29\xba\x88\x37\x2d\xca\x60\x38\xfa\xba\x5b\x20\xac\xa8\x62\xb0\x4c\xd4\xaf\xda\x45\x0a\xba\x5c\x3b\xb9\xc7\x79\xc5\x14\x2d\x18\x34\x19\x1c\x51\xdb\x25\x4d\xb4\x7e\x06\x14\x38\x6c\x59\x55\xd2\x77\xf8\x69\x59\xfc\x7b\x73\xed\x93\x43\xcb\x32\x6d\x3c\x28\xdc\x1b\x9a\xd3\x62\xab\xc2\x27\xf7\x41\xc9\x08\x2b\x23\x08\xad\x13\x57\x21\x9c\xd3\x72\x0d\x42\x72\xf8\x01\x7c\xa7\xf6\x83\xce\x39\xd7\x82\x3c\x1f\x2f\xd6\x60\x1b\xa2\xdf\x35\x89\x52\x20\xe7\x73\x74\xe0\x66\x26\x64\x4e\xb4\x97\x58\xc2\x0e\x0e\xe1\x60\x92\x34\x6d\xa0\x10\xd6\xb5\x83\x61\x27\xe6\x47\xd3\x89\xbd\x63\xfd\x3b\x8d\x03\x3d\x6c\x42\x2d\x5b\x70\xee\xe8\xdf\x4b\xf4\x66\x4e\xe1\x01\x45\x17\x80\x74\xad\x4f\xc3\xf3\xae\xc6\x1d\xc6\xd7\xc2\xce\xc9\xe1\x29\x30\x86\x2f\x4a\xa6\x4b\x15\x84\x73\xc9\x6f\xfd\x7f\xa5\x6e\x9e\xbd\xf1\xb0\xd4\xdd\x45\x5a\xc2\x3e\x4b\x78\xab\xa8\x84\x74\x4a\x91\x3b\x92\x23\x05\xf2\x1c\x1e\x7b\xf3\x09\xf8\xcf\xab\x24\xb6\x60\xa2\xe8\x4c\x9f\x75\x77\xaa\x8c\xe6\x01\x45\x36\x86\xcf\xc3\x63\x3a\xea\xd4\x8d\x7e\x06\xac\x14\x0a\xe0\x29\xf0\xed\x07\x22\x1a\x65\xda\x44\xae\xa2\x73\x1a\xe6\x90\x69\xa2\x8c\x46\xb2\x2f\xde\x49\x38\x08\xed\xfe\xfd\x41\xaf\x9f\xa9\x55\xd7\xdd\x22\x8d\xfa\x45\x63\xc5\x0f\x80\xf3\xb4\x08\xd6\x79\x30\x9e\x93\xee\x59\xa6\xd0\x4b\xee\x22\xe3\x33\xc1\x3a\x27\x68\x36\x78\x7e\x87\x0a\x06\xd5\x2e\x20\xd3\xaf\x15\xfb\xd8\x3b\x73\x14\xbb\x92\xed\x05\x5d\x2e\x29\x38\x2c\x94\xe4\x42\x45\x5e\xd3\xb5\x7d\xdf\x47\xca\x38\xb4\x5c\xaf\xfb\x7d\xdd\x6d\xf4\xa1\x2d\x77\xdd\x2f\xce\x6d\xc4\x7b\x8b\x4e\x67\xa9\x6f\xfe\x04\x00\x00\xff\xff\xb1\xd1\x27\x78\x05\x11\x00\x00")
+
+func jsonschemaDraft04JSONBytes() ([]byte, error) {
+	return bindataRead(
+		_jsonschemaDraft04JSON,
+		"jsonschema-draft-04.json",
+	)
+}
+
+func jsonschemaDraft04JSON() (*asset, error) {
+	bytes, err := jsonschemaDraft04JSONBytes()
+	if err != nil {
+		return nil, err
+	}
+
+	info := bindataFileInfo{name: "jsonschema-draft-04.json", size: 4357, mode: os.FileMode(0644), modTime: time.Unix(1567900649, 0)}
+	a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xe1, 0x48, 0x9d, 0xb, 0x47, 0x55, 0xf0, 0x27, 0x93, 0x30, 0x25, 0x91, 0xd3, 0xfc, 0xb8, 0xf0, 0x7b, 0x68, 0x93, 0xa8, 0x2a, 0x94, 0xf2, 0x48, 0x95, 0xf8, 0xe4, 0xed, 0xf1, 0x1b, 0x82, 0xe2}}
+	return a, nil
+}
+
+var _v2SchemaJSON = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xec\x5d\x4f\x93\xdb\x36\xb2\xbf\xfb\x53\xa0\x14\x57\xd9\xae\xd8\x92\xe3\xf7\x2e\xcf\x97\xd4\xbc\xd8\x49\x66\x37\x5e\x4f\x79\x26\xbb\x87\x78\x5c\x05\x91\x2d\x09\x09\x09\x30\x00\x38\x33\x5a\xef\x7c\xf7\x2d\xf0\x9f\x08\x02\x20\x41\x8a\xd2\xc8\x0e\x0f\xa9\x78\x28\xa0\xd1\xdd\x68\x34\x7e\xdd\xf8\xf7\xf9\x11\x42\x33\x49\x64\x04\xb3\xd7\x68\x76\x86\xfe\x76\xf9\xfe\x1f\xe8\x32\xd8\x40\x8c\xd1\x8a\x71\x74\x79\x8b\xd7\x6b\xe0\xe8\xd5\xfc\x25\x3a\xbb\x38\x9f\xcf\x9e\xab\x0a\x24\x54\xa5\x37\x52\x26\xaf\x17\x0b\x91\x17\x99\x13\xb6\xb8\x79\xb5\x10\x59\xdd\xf9\xef\x82\xd1\x6f\xf2\xc2\x8f\xf3\x4f\xb5\x1a\xea\xc7\x17\x45\x41\xc6\xd7\x8b\x90\xe3\x95\x7c\xf1\xf2\x7f\x8b\xca\x45\x3d\xb9\x4d\x32\xa6\xd8\xf2\x77\x08\x64\xfe\x8d\xc3\x9f\x29\xe1\xa0\x9a\xff\xed\x11\x42\x08\xcd\x8a\xd6\xb3\x9f\x15\x67\x74\xc5\xca\x7f\x27\x58\x6e\xc4\xec\x11\x42\xd7\x59\x5d\x1c\x86\x44\x12\x46\x71\x74\xc1\x59\x02\x5c\x12\x10\xb3\xd7\x68\x85\x23\x01\x59\x81\x04\x4b\x09\x9c\x6a\xbf\x7e\xce\x49\x7d\xba\x7b\x51\xfd\xa1\x44\xe2\xb0\x52\xac\x7d\xb3\x08\x61\x45\x68\x46\x56\x2c\x6e\x80\x86\x8c\xbf\xbd\x93\x40\x05\x61\x74\x96\x95\xbe\x7f\x84\xd0\x7d\x4e\xde\x42\xb7\xe4\xbe\x46\xbb\x14\x5b\x48\x4e\xe8\xba\x90\x05\xa1\x19\xd0\x34\xae\xc4\xce\xbe\xbc\x9a\xbf\x9c\x15\x7f\x5d\x57\xc5\x42\x10\x01\x27\x89\xe2\x48\x51\xb9\xda\x40\xd5\x87\x37\xc0\x15\x5f\x88\xad\x90\xdc\x10\x81\x42\x16\xa4\x31\x50\x39\x2f\x38\xad\xab\xb0\x53\xd8\xac\x94\x56\x6f\xc3\x84\xf4\x11\xa4\x50\xb3\xfa\xe9\xd3\x6f\x9f\x3e\xdf\x2f\xd0\xeb\x8f\x1f\x3f\x7e\xbc\xfe\xf6\xe9\xf7\xaf\x5f\x7f\xfc\x18\x7e\xfb\xec\xfb\xc7\xb3\x36\x79\x54\x43\xe8\x29\xc5\x31\x20\xc6\x11\x49\x9e\xe5\x12\x41\x66\xa0\xe8\xed\x1d\x8e\x93\x08\x5e\xa3\x27\x3b\xc3\x7c\xa2\x73\xba\xc4\x02\x2e\xb0\xdc\xf4\xe5\x76\xd1\xca\x96\xa2\x8a\x94\xcd\x21\xc9\x6c\xec\x2c\x70\x42\x9e\x34\x74\x9d\x19\x7c\xcd\x20\x9c\xea\x2e\x0a\xfe\x42\x84\xd4\x29\x04\x8c\x8a\xb4\x41\xa2\xc1\xdc\x19\x8a\x88\x90\x4a\x49\xef\xce\xdf\xbd\x45\x4a\x52\x81\x70\x10\x40\x22\x21\x44\xcb\x6d\xc5\xec\x4e\x3c\x1c\x45\xef\x57\x9a\xb5\x7d\xae\xfe\xe5\xe4\x31\x86\x90\xe0\xab\x6d\x02\x3b\x2e\xcb\x11\x90\xd9\xa8\xc6\x77\xc2\x59\x98\x06\xfd\xf9\x2e\x78\x45\x01\xa6\xa8\xa0\x71\x5c\xbe\x33\xa7\xd2\xd9\x5f\x95\xef\xd9\xd5\xac\xfd\xdc\x5d\xbf\x5e\xb8\xd1\x3e\xc7\x31\x48\xe0\x5e\x4c\x14\x65\xdf\xb8\xa8\x71\x10\x09\xa3\xc2\xc7\x02\xcb\xa2\x4e\x5a\x02\x82\x94\x13\xb9\xf5\x30\xe6\xb2\xa4\xb5\xfe\x9b\x3e\x7a\xb2\x55\xd2\xa8\x4a\xbc\x16\xb6\x71\x8e\x39\xc7\xdb\x9d\xe1\x10\x09\x71\xbd\x9c\xb3\x41\x89\xd7\xa5\x89\xdc\x57\xb5\x53\x4a\xfe\x4c\xe1\xbc\xa0\x21\x79\x0a\x1a\x0f\x70\xa7\x5c\x08\x8e\xde\xb0\xc0\x43\x24\xad\x74\x63\x0e\xb1\xd9\x90\xe1\xb0\x2d\x13\xa7\x6d\x78\xfd\x04\x14\x38\x8e\x90\xaa\xce\x63\xac\x3e\x23\xbc\x64\xa9\xb4\xf8\x03\x63\xde\xcd\xbe\x16\x13\x4a\x55\xac\x82\x12\xc6\xac\xd4\x35\xf7\x22\xd4\x3a\xff\x22\x73\x0e\x6e\x51\xa0\x75\x1e\xae\x8f\xe8\x5d\xc7\x59\xe6\xe4\x9a\x18\x8d\xd6\x1c\x53\x84\x4d\xb7\x67\x28\x37\x09\x84\x69\x88\x12\x0e\x01\x11\x80\x32\xa2\xf5\xb9\xaa\xc6\xd9\x73\x53\xab\xfb\xb4\x2e\x20\xc6\x54\x92\xa0\x9a\xf3\x69\x1a\x2f\x81\x77\x37\xae\x53\x1a\xce\x40\xc4\xa8\x82\x1c\xb5\xef\xda\x24\x7d\xb9\x61\x69\x14\xa2\x25\xa0\x90\xac\x56\xc0\x81\x4a\xb4\xe2\x2c\xce\x4a\x64\x7a\x9a\x23\xf4\x13\x91\x3f\xa7\x4b\xf4\x63\x84\x6f\x18\x87\x10\xbd\xc3\xfc\x8f\x90\xdd\x52\x44\x04\xc2\x51\xc4\x6e\x21\x74\x48\x21\x81\xc7\xe2\xfd\xea\x12\xf8\x0d\x09\xf6\xe9\x47\x35\xaf\x67\xc4\x14\xf7\x22\x27\x97\xe1\xe2\x76\x2d\x06\x8c\x4a\x1c\x48\x3f\x73\x2d\x0b\x5b\x29\x45\x24\x00\x2a\x0c\x11\xec\x94\xca\xc2\xa6\xc1\x37\x21\x43\x83\x3b\x5f\x97\xf1\x43\x5e\x53\x73\x19\xa5\x36\xd8\x2d\x05\x2e\x34\x0b\xeb\x39\xfc\x1d\x63\x51\x01\xbd\x3d\xbb\x90\x84\x40\x25\x59\x6d\x09\x5d\xa3\x1c\x37\xe6\x5c\x16\x9a\x40\x09\x70\xc1\xe8\x82\xf1\x35\xa6\xe4\xdf\x99\x5c\x8e\x9e\x4d\x79\xb4\x27\x2f\xbf\x7e\xf8\x05\x25\x8c\x50\xa9\x98\x29\x90\x62\x60\xea\x75\xae\x13\xca\xbf\x2b\x1a\x29\x27\x76\xd6\x20\xc6\x64\x5f\xe6\x32\x1a\x08\x87\x21\x07\x21\xbc\xb4\xe4\xe0\x32\x67\xa6\xcd\xf3\x1e\xcd\xd9\x6b\xb6\x6f\x8e\x27\xa7\xed\xdb\xe7\xbc\xcc\x1a\x07\xce\x6f\x87\x33\xf0\xba\x51\x17\x22\x66\x78\x79\x8e\xce\xe5\x13\x81\x80\x06\x2c\xe5\x78\x0d\xa1\xb2\xb8\x54\xa8\x79\x09\xbd\xbf\x3c\x47\x01\x8b\x13\x2c\xc9\x32\xaa\xaa\x1d\xd5\xee\xab\x36\xbd\x6c\xfd\x54\x6c\xc8\x08\x01\x3c\xbd\xe7\x07\x88\xb0\x24\x37\x79\x90\x28\x4a\x1d\x10\x1a\x92\x1b\x12\xa6\x38\x42\x40\xc3\x4c\x43\x62\x8e\xae\x36\xb0\x45\x71\x2a\xa4\x9a\x23\x79\x59\xb1\xa8\xf2\xa4\x0c\x60\x9f\xcc\x8d\x40\xf5\x80\xca\xa8\x99\xc3\xa7\x85\x1f\x31\x25\xa9\x82\xc5\x6d\xbd\xd8\x36\x76\x7c\x02\x28\x97\xf6\x1d\x74\x3b\x11\x7e\x91\xae\x32\xf8\x6c\xf4\xe6\x7b\x9a\xa5\x1f\x62\xc6\x21\xcf\x9a\xe5\xed\x8b\x02\xf3\x2c\x33\x33\xdf\x00\xca\xc9\x09\xb4\x04\xf5\xa5\x08\xd7\xc3\x02\x18\x66\xf1\xab\x1e\x83\x37\x4c\xcd\x12\xc1\x1d\x50\xf6\xaa\xbd\xfe\xe2\x73\x48\x38\x08\xa0\x32\x9b\x18\x44\x86\x0b\x6a\xc1\xaa\x26\x96\x2d\x96\x3c\xa0\x54\x65\x73\x87\x15\xca\x15\xe5\xf5\x94\x46\x9f\x33\x1a\x0c\x9a\xb1\x5a\xd9\x6a\x95\xcd\xcb\x7e\xec\x9a\xc5\x94\x3b\x37\x26\x31\xd7\xfc\xe4\x1f\x13\x8c\x31\x75\x9c\xba\xf7\x87\x3c\xa1\xb7\x4f\x17\x1b\x09\x82\x98\xc4\x70\x95\xd3\xe8\x4c\x48\x5a\xa6\xd6\x2a\x3d\x56\x42\x80\x9f\xaf\xae\x2e\x50\x0c\x42\xe0\x35\x34\x3c\x8a\x62\x03\x37\xba\xb2\x27\x04\xda\x25\x8d\x06\xe2\xa0\x13\x8a\xf3\xf5\xec\x10\x72\x67\x88\x90\x3d\x4b\x64\xeb\xaa\xda\x8f\xf7\x5a\x75\x47\x9a\xa8\x51\x70\x26\xd2\x38\xc6\x7c\xbb\x57\xfc\xbd\xe4\x04\x56\xa8\xa0\x54\x9a\x45\xd5\xf7\x0f\x16\xfc\x57\x1c\x3c\xdf\x23\xba\x77\x38\xda\x16\x4b\x31\x53\x6a\x4d\x9a\x15\x63\xe7\xe1\x18\x69\x9f\x22\xe0\x24\xbb\x94\x4b\x97\xee\x2d\xf9\x70\x87\x72\x7b\xe6\xc4\x33\x2a\x66\x5e\x1c\x35\x72\xe3\x2d\xda\x73\xe4\xc7\x51\x6d\xa4\xa1\x2a\x4f\xde\x94\xcb\xb2\x3e\x31\x48\xae\x82\xce\xc9\xc8\x65\xcd\xc3\xb7\x34\xb6\x2b\xdf\x58\x65\x78\x6e\x73\xac\x5e\x24\x0d\x3f\xdc\x70\x23\xc6\xda\x52\x0b\x2d\x63\x7d\xa9\x49\x2d\x54\x48\x28\xc0\x12\x9c\xe3\x63\xc9\x58\x04\x98\x36\x07\xc8\x0a\xa7\x91\xd4\xf0\xbc\xc1\xa8\xb9\x70\xd0\xc6\xa9\xb6\x78\x80\x5a\xa3\xb4\x2c\xf4\x18\x0b\x8a\x9d\xd0\xb4\x55\x10\xee\x0d\xc5\xd6\xe0\x99\x93\xdc\xa1\x04\xbb\xf1\xa7\x23\xd1\xd1\x97\x8c\x87\x13\x0a\x21\x02\xe9\x99\x25\xed\x20\xc5\x92\x66\x3c\x32\x9c\xd6\x06\xb0\x31\x5c\x86\x29\x0a\xcb\x60\x33\x12\xa5\x91\xfc\x96\x75\xd0\x59\xd7\x13\xbd\xd3\x23\x79\xdd\x2a\x90\xa6\x38\x06\x91\x39\x7f\x20\x72\x03\x1c\x2d\x01\x61\xba\x45\x37\x38\x22\x61\x8e\x71\x85\xc4\x32\x15\x28\x60\x61\x16\xb8\x3d\x29\xdc\x4d\x3d\x2f\x12\x13\x7d\xc8\x7e\x37\xee\xa8\x7f\xfa\xdb\xcb\x17\xff\x77\xfd\xf9\x7f\xee\x9f\x3d\xfe\xcf\xa7\xa7\x45\xfb\xcf\x1e\xf7\xf3\xe0\xff\xc4\x51\x0a\x8e\x4c\xcb\x01\xdc\x0a\x65\xb2\x01\x83\xed\x3d\xe4\xa9\xa3\x4e\x2d\x59\xc5\xe8\x2f\x48\x7d\x5a\x6e\x37\xbf\x5c\x9f\x35\x13\x64\x14\xfa\xef\x0b\x68\xa6\x0d\xb4\x8e\xf1\xa8\xff\xbb\x60\xf4\x03\x64\xab\x5b\x81\x65\x51\xe6\xda\xca\xfa\xf0\xb0\xac\x3e\x9c\xca\x26\x0e\x1d\xdb\x57\x5b\xbb\xb4\x9a\xa6\xb6\x9b\x1a\x6b\xd1\x9a\x9e\x7e\x33\x9a\xec\x41\x69\x45\x22\xb8\xb4\x51\xeb\x04\x77\xca\x6f\x7b\x7b\xc8\xb2\xb0\x95\x92\x25\x5b\xd0\x42\xaa\x2a\xdd\x32\x78\x4f\x0c\xab\x68\x46\x6c\xea\x6d\xf4\x5c\x5e\xde\xc4\xac\xa5\xf9\xd1\x00\x9f\x7d\x98\x65\x24\xbd\xc7\x97\xd4\xb3\x3a\xa8\x2b\xa0\x34\x76\xf9\x65\x5f\x2d\x25\x95\x1b\xcf\xd6\xf4\x9b\x5f\x09\x95\xb0\x36\x3f\xdb\xd0\x39\x2a\x93\x1c\x9d\x03\xa2\x4a\xca\xf5\xf6\x10\xb6\x94\x89\x0b\x6a\x70\x12\x13\x49\x6e\x40\xe4\x29\x12\x2b\xbd\x80\x45\x11\x04\xaa\xc2\x8f\x56\x9e\x5c\x6b\xec\x8d\x5a\x0e\x14\x59\x06\x2b\x1e\x24\xcb\xc2\x56\x4a\x31\xbe\x23\x71\x1a\xfb\x51\x2a\x0b\x3b\x1c\x48\x10\xa5\x82\xdc\xc0\xbb\x3e\x24\x8d\x5a\x76\x2e\x09\xed\xc1\x65\x51\xb8\x83\xcb\x3e\x24\x8d\x5a\x2e\x5d\xfe\x02\x74\x2d\x3d\xf1\xef\xae\xb8\x4b\xe6\x5e\xd4\xaa\xe2\x2e\x5c\x5e\xec\x0e\xf5\x5b\x0c\xcb\x0a\xbb\xa4\x3c\xf7\x1f\x2a\x55\x69\x97\x8c\x7d\x68\x95\xa5\xad\xb4\xf4\x9c\xa5\x07\xb9\x7a\x05\xbb\xad\x50\x6f\xfb\xa0\x4e\x9b\x48\x23\x49\x92\x28\x87\x19\x3e\x32\xee\xca\x3b\x46\x7e\x7f\x18\x64\xcc\xcc\x0f\x34\xe9\x36\x8b\xb7\x6c\xa8\xa5\x5b\x54\x4c\x54\x5b\x15\x3a\xf1\x6c\x2d\xfe\x96\xc8\x0d\xba\x7b\x81\x88\xc8\x23\xab\xee\x7d\x3b\x92\xa7\x60\x29\xe3\xdc\xff\xb8\x64\xe1\xf6\xa2\x5a\x59\xdc\x6f\xeb\x45\x7d\x6a\xd1\x76\x1e\xea\xb8\xf1\xfa\x14\xd3\x36\x63\xe5\xd7\xf3\xe4\xbe\x25\xbd\x5e\x05\xeb\x73\x74\xb5\x21\x2a\x2e\x4e\xa3\x30\xdf\xbf\x43\x28\x2a\xd1\xa5\x2a\x9d\x8a\xfd\x76\xd8\x8d\xbc\x67\x65\xc7\xb8\x03\x45\xec\xa3\xb0\x37\x8a\x70\x4c\x68\x91\x51\x8e\x58\x80\xed\x4a\xf3\x81\x62\xca\x96\xbb\xf1\x52\xcd\x80\xfb\xe4\x4a\x5d\x6c\xdf\x6e\x20\x4b\x80\x30\x8e\x28\x93\xf9\xe9\x8d\x8a\x6d\xd5\x59\x65\x7b\xaa\x44\x9e\xc0\xc2\xd1\x7c\x40\x26\xd6\x1a\xce\xf9\xc5\x69\x7b\x6c\xec\xc8\x71\x7b\xe5\x21\x2e\xd3\xe5\x65\x93\x91\x53\x0b\x7b\x3a\xc7\xfa\x17\x6a\x01\xa7\x33\xd0\xf4\x40\x0f\x39\x87\xda\xe4\x54\x87\x3a\xd5\xe3\xc7\xa6\x8e\x20\xd4\x11\xb2\x4e\xb1\xe9\x14\x9b\x4e\xb1\xe9\x14\x9b\xfe\x15\x63\xd3\x47\xf5\xff\x97\x38\xe9\xcf\x14\xf8\x76\x82\x49\x13\x4c\xaa\x7d\xcd\x6c\x62\x42\x49\x87\x43\x49\x19\x33\x6f\xe3\x44\x6e\x9b\xab\x8a\x3e\x86\xaa\x99\x52\x1b\x5b\x59\x33\x02\x09\xa0\x21\xa1\x6b\x84\x6b\x66\xbb\xdc\x16\x0c\xd3\x68\xab\xec\x36\x4b\xd8\x60\x8a\x40\x31\x85\x6e\x14\x57\x13\xc2\xfb\x92\x10\xde\xbf\x88\xdc\xbc\x53\x5e\x7f\x82\x7a\x13\xd4\x9b\xa0\xde\x04\xf5\x90\x01\xf5\x94\xcb\x7b\x83\x25\x9e\xd0\xde\x84\xf6\x6a\x5f\x4b\xb3\x98\x00\xdf\x04\xf8\x6c\xbc\x7f\x19\x80\xaf\xf1\x71\x45\x22\x98\x40\xe0\x04\x02\x27\x10\xd8\x29\xf5\x04\x02\xff\x4a\x20\x30\xc1\x72\xf3\x65\x02\x40\xd7\xc1\xd1\xe2\x6b\xf1\xa9\x7b\xfb\xe4\x20\xc0\x68\x9d\xd4\xb4\xd3\x96\xb5\xa6\xd1\x41\x20\xe6\x89\xc3\x48\x65\x58\x13\x84\x9c\x56\x56\x3b\x0c\xe0\x6b\x83\x5c\x13\xd2\x9a\x90\xd6\x84\xb4\x26\xa4\x85\x0c\xa4\x45\x19\xfd\xff\x63\x6c\x52\xb5\x1f\x1e\x19\x74\x3a\xcd\xb9\x69\xce\xa6\x3a\x0f\x7a\x2d\x19\xc7\x81\x14\x5d\xcb\xd5\x03\xc9\x39\xd0\xb0\xd1\xb3\xcd\xfb\x7a\x2d\x5d\x3a\x48\xe1\xfa\x2e\xe6\x81\x42\x18\x86\xd6\xc1\xbe\xb1\x23\xd3\xf7\x34\xed\x19\x0a\x0b\xc4\x48\x44\xfd\x22\x50\xb6\x42\x58\xbb\xe5\x3d\xa7\x73\xd4\x8b\xc4\x8c\x70\x61\xec\x73\xee\xc3\x81\x8b\xf5\xe2\xd7\x52\x3e\xcf\xeb\xeb\x17\x3b\x71\x16\xda\x7d\xb8\xde\xf0\x7a\x8f\x06\x2d\xa7\x40\x7b\xc1\x9d\x41\x4d\xb6\x61\xa2\x4e\x9f\x3d\xa0\xc5\xae\xe3\x1c\x1d\x40\x6c\x48\x8b\x63\xa0\xb5\x01\xed\x8e\x02\xe9\x86\xc8\x3b\x06\xee\xdb\x4b\xde\xbd\xc0\xa1\x6f\xcb\xda\xfc\xc2\x44\x16\x87\x9c\x17\x31\xd3\x30\x20\x39\x42\xcb\x6f\xf2\xf1\xf4\x72\x10\xf8\x1c\xa0\xf3\xbd\x10\xea\x21\x35\x7d\xe8\x86\xdb\x15\xed\x81\x81\x07\x28\xbb\x13\x28\xc7\xf8\xce\x7d\x8d\xc2\x31\xb4\x7e\x94\xd6\xdb\x55\xef\x4a\xfb\xed\xc3\x40\x3e\xeb\x9f\xe9\x99\x0f\xdf\x08\x65\x88\x27\x73\x86\x31\x9d\x47\xdf\x55\x19\xba\x3d\xee\x15\x0a\xcd\x8c\xaa\x5e\xb9\xf6\x57\x33\x73\x5a\xa1\x89\x7b\x3b\xa0\xb2\xa4\xc2\xf6\xc1\x53\xb5\x00\xca\x23\xe5\xf4\x60\x6a\xb4\x2d\x74\xea\x4e\xed\x3b\xe3\x47\xfb\xed\x82\x3d\x19\xd4\x3b\x6b\xaf\xae\x2b\x2f\x57\xb3\x82\x68\xcb\xed\x88\x2e\xe1\x5c\xd7\x26\xfa\x0a\x65\xe7\xce\x11\x33\xb4\xdd\x66\xe3\x37\xf6\xfa\x70\xd6\x4f\xa1\x21\x51\xd8\x3c\x26\x14\x4b\xc6\x87\x44\x27\x1c\x70\xf8\x9e\x46\xce\xab\x21\x07\x5f\xc1\x76\x17\x1b\x77\xb4\xda\x75\xa0\x0a\x3a\x30\xe1\xf8\x97\x32\x16\x2b\x00\x75\x85\xee\x62\x46\xef\xd3\x85\xb5\x6b\x60\xbe\xf2\x30\x7a\x8c\x0b\x4b\xa6\xd0\xf9\x64\x42\xe7\x07\x41\x41\xe3\x2c\x5d\xf9\x6d\xe9\x39\x98\x3b\x3b\x5d\x67\xd4\x5c\xed\xf2\xf0\x48\x7b\xbd\x2d\x31\xdd\x3f\x34\xad\x44\x76\x51\x9a\x56\x22\xa7\x95\xc8\x69\x25\xf2\xe1\x56\x22\x1f\x00\x32\x6a\x73\x92\xed\xe1\xc6\x7d\x9f\x49\x2c\x69\x7e\xc8\x31\x4c\x0c\xb4\xf2\x54\x3b\x79\x3b\x9e\x4d\xb4\xd1\x18\x3e\x5f\x9a\x93\xa2\x11\xc3\xda\x27\x0b\xaf\x37\x2e\x5c\x37\xfb\xeb\x9a\xd6\xc3\xac\xc3\xcc\xf8\x1e\x5b\x9d\xac\x22\x64\xb7\xed\x26\xb8\xf3\xb9\x3c\xbb\x1f\xe2\xb0\x22\x77\x43\x6a\x62\x29\x39\x59\xa6\xe6\xe5\xcd\x7b\x83\xc0\x5b\x8e\x93\x64\xac\xeb\xca\x4f\x65\xac\x4a\xbc\x1e\xcd\x82\xfa\x3c\x70\x36\xb6\xb5\xed\x79\xef\xec\x68\x00\xff\x54\xfa\xb5\xe3\xf1\xdb\xe1\xbe\xce\x76\x17\xaf\x57\xb6\x6b\x89\x05\x09\xce\x52\xb9\x01\x2a\x49\xbe\xd9\xf4\xd2\xb8\x7a\xbf\x91\x02\xf3\x22\x8c\x13\xf2\x77\xd8\x8e\x43\x8b\xe1\x54\x6e\x5e\x9d\xc7\x49\x44\x02\x22\xc7\xa4\x79\x81\x85\xb8\x65\x3c\x1c\x93\xe6\x59\xa2\xf8\x1c\x51\x95\x05\xd9\x20\x00\x21\x7e\x60\x21\x58\xa9\x56\xff\xbe\xb6\x5a\x5e\x5b\x3f\x1f\xd6\xd3\x3c\xc4\x4d\xba\x99\xb4\x63\x6e\x7d\x3e\x3d\x57\xd2\x18\x5f\x47\xe8\xc3\x06\x8a\x68\x6c\x7f\x3b\x72\x0f\xe7\xe2\x77\x77\xf1\xd0\x99\xab\xdf\x2e\xfe\xd6\xbb\xcd\x1a\xb9\x90\xd1\xaf\xf2\x38\x3d\xdb\x74\xf8\xeb\xe3\xda\xe8\x2a\x62\xb7\xda\x1b\x07\xa9\xdc\x30\x5e\xbc\x68\xfb\x6b\x9f\x97\xf1\xc6\xb1\xd8\x5c\x29\x1e\x49\x30\xc5\xf7\xde\xad\x91\x42\xf9\xdd\xed\x89\x80\x25\xbe\x37\xd7\xe7\x32\x5c\xe6\x35\xac\xd4\x0c\x2d\xf7\x90\xc4\xe3\xf5\xe3\x2f\x7f\x54\x18\x88\xe3\x61\x47\x85\x64\x7f\xc0\xd7\x3f\x1a\x92\x42\xe9\xc7\x1e\x0d\x95\x76\xa7\x51\xa0\x8f\x02\x1b\x46\x9e\x06\x42\xd1\xf2\x01\x07\x02\xde\xe9\x7d\x1a\x0b\xa7\x32\x16\xcc\xc0\xee\xc4\x90\xd2\x5f\x6f\x98\x54\x5d\xf2\x95\xe1\xa7\x69\x10\x3a\x06\xe1\x65\xb3\x17\x47\x58\x78\xd0\x45\xd6\x5b\xd5\x5f\x25\x1d\x71\x49\xa6\x7a\x64\xda\xd0\x6f\xc7\x3a\x4c\xe3\x09\xc0\x6e\x96\x2c\xa7\xa7\x77\x34\x10\x05\x08\x21\x44\x92\x65\x77\xdf\x20\x5c\xbc\xe7\x97\x3f\xf4\x1a\x45\xd6\xe7\x27\x4a\xde\x74\x27\x66\x11\x7d\x70\xba\xd3\x78\xf9\x1e\x0d\xca\xc8\x39\xde\x7c\xb3\xa6\xe1\xbc\xd7\xc1\x6a\x6f\xb3\x0e\x52\xbe\xe4\x98\x8a\x15\x70\x94\x70\x26\x59\xc0\xa2\xf2\x1c\xfb\xd9\xc5\xf9\xbc\xd5\x92\x9c\xa3\xdf\xe6\x1e\xb3\x0d\x49\xba\x87\x50\x5f\x84\xfe\xe9\xd6\xf8\xbb\xe6\xf0\x7a\xeb\xa6\x65\x3b\x86\x8b\x79\x93\xf5\x59\x20\x6e\xb4\xa7\x44\xf4\x3f\xa5\xfe\x67\x42\x12\xdb\xd3\xe7\xbb\xa5\xa3\x8c\x5c\x2b\x97\xbb\xbb\x7f\x8e\xc5\x6e\xed\x43\x5c\xbf\x74\xc8\x8f\xff\xe6\xd6\xbe\x91\xb6\xf5\x95\xe4\xed\x93\xc4\xa8\x5b\xf9\x76\x4d\x35\xb7\xd8\x8c\xb6\x7d\xaf\x72\xe0\xb6\xbd\x01\x63\x9e\x76\xab\x1a\x32\x76\xe4\x8c\x76\xc2\xad\x6c\xa2\x65\xf7\xcf\xf8\xa7\xda\x2a\xb9\x8c\x3d\x3c\xa3\x9d\x64\x33\xe5\x1a\xb5\x2d\xfb\x86\xa2\x5a\x7f\x19\x5b\x7f\xc6\x3f\xd1\x53\xd3\xe2\x41\x5b\xd3\x4f\xf0\xec\xb0\x42\x73\x43\xd2\x68\x27\xd3\x6a\x6a\x34\xf6\x4e\x1e\x52\x8b\x87\x6c\xcc\xae\x44\xfb\x9e\xa7\x51\x4f\x9d\x55\x03\x81\x8e\x67\xfc\xb4\x69\xf0\x3a\x18\xf2\x40\xd0\xf6\xa8\x34\xe3\xc9\x98\xaf\xf6\xda\x24\xd3\xeb\x60\xb9\x0e\xd3\x1f\xa9\xff\xee\x1f\xfd\x37\x00\x00\xff\xff\x69\x5d\x0a\x6a\x39\x9d\x00\x00")
+
+func v2SchemaJSONBytes() ([]byte, error) {
+	return bindataRead(
+		_v2SchemaJSON,
+		"v2/schema.json",
+	)
+}
+
+func v2SchemaJSON() (*asset, error) {
+	bytes, err := v2SchemaJSONBytes()
+	if err != nil {
+		return nil, err
+	}
+
+	info := bindataFileInfo{name: "v2/schema.json", size: 40249, mode: os.FileMode(0644), modTime: time.Unix(1567900649, 0)}
+	a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xcb, 0x25, 0x27, 0xe8, 0x46, 0xae, 0x22, 0xc4, 0xf4, 0x8b, 0x1, 0x32, 0x4d, 0x1f, 0xf8, 0xdf, 0x75, 0x15, 0xc8, 0x2d, 0xc7, 0xed, 0xe, 0x7e, 0x0, 0x75, 0xc0, 0xf9, 0xd2, 0x1f, 0x75, 0x57}}
+	return a, nil
+}
+
+// Asset loads and returns the asset for the given name.
+// It returns an error if the asset could not be found or
+// could not be loaded.
+func Asset(name string) ([]byte, error) {
+	canonicalName := strings.Replace(name, "\\", "/", -1)
+	if f, ok := _bindata[canonicalName]; ok {
+		a, err := f()
+		if err != nil {
+			return nil, fmt.Errorf("Asset %s can't read by error: %v", name, err)
+		}
+		return a.bytes, nil
+	}
+	return nil, fmt.Errorf("Asset %s not found", name)
+}
+
+// AssetString returns the asset contents as a string (instead of a []byte).
+func AssetString(name string) (string, error) {
+	data, err := Asset(name)
+	return string(data), err
+}
+
+// MustAsset is like Asset but panics when Asset would return an error.
+// It simplifies safe initialization of global variables.
+func MustAsset(name string) []byte {
+	a, err := Asset(name)
+	if err != nil {
+		panic("asset: Asset(" + name + "): " + err.Error())
+	}
+
+	return a
+}
+
+// MustAssetString is like AssetString but panics when Asset would return an
+// error. It simplifies safe initialization of global variables.
+func MustAssetString(name string) string {
+	return string(MustAsset(name))
+}
+
+// AssetInfo loads and returns the asset info for the given name.
+// It returns an error if the asset could not be found or
+// could not be loaded.
+func AssetInfo(name string) (os.FileInfo, error) {
+	canonicalName := strings.Replace(name, "\\", "/", -1)
+	if f, ok := _bindata[canonicalName]; ok {
+		a, err := f()
+		if err != nil {
+			return nil, fmt.Errorf("AssetInfo %s can't read by error: %v", name, err)
+		}
+		return a.info, nil
+	}
+	return nil, fmt.Errorf("AssetInfo %s not found", name)
+}
+
+// AssetDigest returns the digest of the file with the given name. It returns an
+// error if the asset could not be found or the digest could not be loaded.
+func AssetDigest(name string) ([sha256.Size]byte, error) {
+	canonicalName := strings.Replace(name, "\\", "/", -1)
+	if f, ok := _bindata[canonicalName]; ok {
+		a, err := f()
+		if err != nil {
+			return [sha256.Size]byte{}, fmt.Errorf("AssetDigest %s can't read by error: %v", name, err)
+		}
+		return a.digest, nil
+	}
+	return [sha256.Size]byte{}, fmt.Errorf("AssetDigest %s not found", name)
+}
+
+// Digests returns a map of all known files and their checksums.
+func Digests() (map[string][sha256.Size]byte, error) {
+	mp := make(map[string][sha256.Size]byte, len(_bindata))
+	for name := range _bindata {
+		a, err := _bindata[name]()
+		if err != nil {
+			return nil, err
+		}
+		mp[name] = a.digest
+	}
+	return mp, nil
+}
+
+// AssetNames returns the names of the assets.
+func AssetNames() []string {
+	names := make([]string, 0, len(_bindata))
+	for name := range _bindata {
+		names = append(names, name)
+	}
+	return names
+}
+
+// _bindata is a table, holding each asset generator, mapped to its name.
+var _bindata = map[string]func() (*asset, error){
+	"jsonschema-draft-04.json": jsonschemaDraft04JSON,
+
+	"v2/schema.json": v2SchemaJSON,
+}
+
+// AssetDir returns the file names below a certain
+// directory embedded in the file by go-bindata.
+// For example if you run go-bindata on data/... and data contains the
+// following hierarchy:
+//     data/
+//       foo.txt
+//       img/
+//         a.png
+//         b.png
+// then AssetDir("data") would return []string{"foo.txt", "img"},
+// AssetDir("data/img") would return []string{"a.png", "b.png"},
+// AssetDir("foo.txt") and AssetDir("notexist") would return an error, and
+// AssetDir("") will return []string{"data"}.
+func AssetDir(name string) ([]string, error) {
+	node := _bintree
+	if len(name) != 0 {
+		canonicalName := strings.Replace(name, "\\", "/", -1)
+		pathList := strings.Split(canonicalName, "/")
+		for _, p := range pathList {
+			node = node.Children[p]
+			if node == nil {
+				return nil, fmt.Errorf("Asset %s not found", name)
+			}
+		}
+	}
+	if node.Func != nil {
+		return nil, fmt.Errorf("Asset %s not found", name)
+	}
+	rv := make([]string, 0, len(node.Children))
+	for childName := range node.Children {
+		rv = append(rv, childName)
+	}
+	return rv, nil
+}
+
+type bintree struct {
+	Func     func() (*asset, error)
+	Children map[string]*bintree
+}
+
+var _bintree = &bintree{nil, map[string]*bintree{
+	"jsonschema-draft-04.json": &bintree{jsonschemaDraft04JSON, map[string]*bintree{}},
+	"v2": &bintree{nil, map[string]*bintree{
+		"schema.json": &bintree{v2SchemaJSON, map[string]*bintree{}},
+	}},
+}}
+
+// RestoreAsset restores an asset under the given directory.
+func RestoreAsset(dir, name string) error {
+	data, err := Asset(name)
+	if err != nil {
+		return err
+	}
+	info, err := AssetInfo(name)
+	if err != nil {
+		return err
+	}
+	err = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755))
+	if err != nil {
+		return err
+	}
+	err = ioutil.WriteFile(_filePath(dir, name), data, info.Mode())
+	if err != nil {
+		return err
+	}
+	return os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime())
+}
+
+// RestoreAssets restores an asset under the given directory recursively.
+func RestoreAssets(dir, name string) error {
+	children, err := AssetDir(name)
+	// File
+	if err != nil {
+		return RestoreAsset(dir, name)
+	}
+	// Dir
+	for _, child := range children {
+		err = RestoreAssets(dir, filepath.Join(name, child))
+		if err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+func _filePath(dir, name string) string {
+	canonicalName := strings.Replace(name, "\\", "/", -1)
+	return filepath.Join(append([]string{dir}, strings.Split(canonicalName, "/")...)...)
+}
diff --git a/vendor/github.com/go-openapi/spec/cache.go b/vendor/github.com/go-openapi/spec/cache.go
new file mode 100644
index 00000000..3fada0da
--- /dev/null
+++ b/vendor/github.com/go-openapi/spec/cache.go
@@ -0,0 +1,60 @@
+// Copyright 2015 go-swagger maintainers
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//    http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package spec
+
+import "sync"
+
+// ResolutionCache a cache for resolving urls
+type ResolutionCache interface {
+	Get(string) (interface{}, bool)
+	Set(string, interface{})
+}
+
+type simpleCache struct {
+	lock  sync.RWMutex
+	store map[string]interface{}
+}
+
+// Get retrieves a cached URI
+func (s *simpleCache) Get(uri string) (interface{}, bool) {
+	debugLog("getting %q from resolution cache", uri)
+	s.lock.RLock()
+	v, ok := s.store[uri]
+	debugLog("got %q from resolution cache: %t", uri, ok)
+
+	s.lock.RUnlock()
+	return v, ok
+}
+
+// Set caches a URI
+func (s *simpleCache) Set(uri string, data interface{}) {
+	s.lock.Lock()
+	s.store[uri] = data
+	s.lock.Unlock()
+}
+
+var resCache ResolutionCache
+
+func init() {
+	resCache = initResolutionCache()
+}
+
+// initResolutionCache initializes the URI resolution cache
+func initResolutionCache() ResolutionCache {
+	return &simpleCache{store: map[string]interface{}{
+		"http://swagger.io/v2/schema.json":       MustLoadSwagger20Schema(),
+		"http://json-schema.org/draft-04/schema": MustLoadJSONSchemaDraft04(),
+	}}
+}
diff --git a/vendor/github.com/go-openapi/spec/contact_info.go b/vendor/github.com/go-openapi/spec/contact_info.go
new file mode 100644
index 00000000..f285970a
--- /dev/null
+++ b/vendor/github.com/go-openapi/spec/contact_info.go
@@ -0,0 +1,24 @@
+// Copyright 2015 go-swagger maintainers
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//    http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package spec
+
+// ContactInfo contact information for the exposed API.
+//
+// For more information: http://goo.gl/8us55a#contactObject
+type ContactInfo struct {
+	Name  string `json:"name,omitempty"`
+	URL   string `json:"url,omitempty"`
+	Email string `json:"email,omitempty"`
+}
diff --git a/vendor/github.com/go-openapi/spec/debug.go b/vendor/github.com/go-openapi/spec/debug.go
new file mode 100644
index 00000000..389c528f
--- /dev/null
+++ b/vendor/github.com/go-openapi/spec/debug.go
@@ -0,0 +1,47 @@
+// Copyright 2015 go-swagger maintainers
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//    http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package spec
+
+import (
+	"fmt"
+	"log"
+	"os"
+	"path/filepath"
+	"runtime"
+)
+
+var (
+	// Debug is true when the SWAGGER_DEBUG env var is not empty.
+	// It enables a more verbose logging of this package.
+	Debug = os.Getenv("SWAGGER_DEBUG") != ""
+	// specLogger is a debug logger for this package
+	specLogger *log.Logger
+)
+
+func init() {
+	debugOptions()
+}
+
+func debugOptions() {
+	specLogger = log.New(os.Stdout, "spec:", log.LstdFlags)
+}
+
+func debugLog(msg string, args ...interface{}) {
+	// A private, trivial trace logger, based on go-openapi/spec/expander.go:debugLog()
+	if Debug {
+		_, file1, pos1, _ := runtime.Caller(1)
+		specLogger.Printf("%s:%d: %s", filepath.Base(file1), pos1, fmt.Sprintf(msg, args...))
+	}
+}
diff --git a/vendor/github.com/go-openapi/spec/expander.go b/vendor/github.com/go-openapi/spec/expander.go
new file mode 100644
index 00000000..1e7fc8c4
--- /dev/null
+++ b/vendor/github.com/go-openapi/spec/expander.go
@@ -0,0 +1,650 @@
+// Copyright 2015 go-swagger maintainers
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//    http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package spec
+
+import (
+	"encoding/json"
+	"fmt"
+	"strings"
+)
+
+// ExpandOptions provides options for spec expand
+type ExpandOptions struct {
+	RelativeBase        string
+	SkipSchemas         bool
+	ContinueOnError     bool
+	AbsoluteCircularRef bool
+}
+
+// ResolveRefWithBase resolves a reference against a context root with preservation of base path
+func ResolveRefWithBase(root interface{}, ref *Ref, opts *ExpandOptions) (*Schema, error) {
+	resolver, err := defaultSchemaLoader(root, opts, nil, nil)
+	if err != nil {
+		return nil, err
+	}
+	specBasePath := ""
+	if opts != nil && opts.RelativeBase != "" {
+		specBasePath, _ = absPath(opts.RelativeBase)
+	}
+
+	result := new(Schema)
+	if err := resolver.Resolve(ref, result, specBasePath); err != nil {
+		return nil, err
+	}
+	return result, nil
+}
+
+// ResolveRef resolves a reference against a context root
+// ref is guaranteed to be in root (no need to go to external files)
+// ResolveRef is ONLY called from the code generation module
+func ResolveRef(root interface{}, ref *Ref) (*Schema, error) {
+	res, _, err := ref.GetPointer().Get(root)
+	if err != nil {
+		panic(err)
+	}
+	switch sch := res.(type) {
+	case Schema:
+		return &sch, nil
+	case *Schema:
+		return sch, nil
+	case map[string]interface{}:
+		b, _ := json.Marshal(sch)
+		newSch := new(Schema)
+		_ = json.Unmarshal(b, newSch)
+		return newSch, nil
+	default:
+		return nil, fmt.Errorf("unknown type for the resolved reference")
+	}
+}
+
+// ResolveParameter resolves a parameter reference against a context root
+func ResolveParameter(root interface{}, ref Ref) (*Parameter, error) {
+	return ResolveParameterWithBase(root, ref, nil)
+}
+
+// ResolveParameterWithBase resolves a parameter reference against a context root and base path
+func ResolveParameterWithBase(root interface{}, ref Ref, opts *ExpandOptions) (*Parameter, error) {
+	resolver, err := defaultSchemaLoader(root, opts, nil, nil)
+	if err != nil {
+		return nil, err
+	}
+
+	result := new(Parameter)
+	if err := resolver.Resolve(&ref, result, ""); err != nil {
+		return nil, err
+	}
+	return result, nil
+}
+
+// ResolveResponse resolves response a reference against a context root
+func ResolveResponse(root interface{}, ref Ref) (*Response, error) {
+	return ResolveResponseWithBase(root, ref, nil)
+}
+
+// ResolveResponseWithBase resolves response a reference against a context root and base path
+func ResolveResponseWithBase(root interface{}, ref Ref, opts *ExpandOptions) (*Response, error) {
+	resolver, err := defaultSchemaLoader(root, opts, nil, nil)
+	if err != nil {
+		return nil, err
+	}
+
+	result := new(Response)
+	if err := resolver.Resolve(&ref, result, ""); err != nil {
+		return nil, err
+	}
+	return result, nil
+}
+
+// ResolveItems resolves parameter items reference against a context root and base path.
+//
+// NOTE: stricly speaking, this construct is not supported by Swagger 2.0.
+// Similarly, $ref are forbidden in response headers.
+func ResolveItems(root interface{}, ref Ref, opts *ExpandOptions) (*Items, error) {
+	resolver, err := defaultSchemaLoader(root, opts, nil, nil)
+	if err != nil {
+		return nil, err
+	}
+	basePath := ""
+	if opts.RelativeBase != "" {
+		basePath = opts.RelativeBase
+	}
+	result := new(Items)
+	if err := resolver.Resolve(&ref, result, basePath); err != nil {
+		return nil, err
+	}
+	return result, nil
+}
+
+// ResolvePathItem resolves response a path item against a context root and base path
+func ResolvePathItem(root interface{}, ref Ref, opts *ExpandOptions) (*PathItem, error) {
+	resolver, err := defaultSchemaLoader(root, opts, nil, nil)
+	if err != nil {
+		return nil, err
+	}
+	basePath := ""
+	if opts.RelativeBase != "" {
+		basePath = opts.RelativeBase
+	}
+	result := new(PathItem)
+	if err := resolver.Resolve(&ref, result, basePath); err != nil {
+		return nil, err
+	}
+	return result, nil
+}
+
+// ExpandSpec expands the references in a swagger spec
+func ExpandSpec(spec *Swagger, options *ExpandOptions) error {
+	resolver, err := defaultSchemaLoader(spec, options, nil, nil)
+	// Just in case this ever returns an error.
+	if resolver.shouldStopOnError(err) {
+		return err
+	}
+
+	// getting the base path of the spec to adjust all subsequent reference resolutions
+	specBasePath := ""
+	if options != nil && options.RelativeBase != "" {
+		specBasePath, _ = absPath(options.RelativeBase)
+	}
+
+	if options == nil || !options.SkipSchemas {
+		for key, definition := range spec.Definitions {
+			var def *Schema
+			var err error
+			if def, err = expandSchema(definition, []string{fmt.Sprintf("#/definitions/%s", key)}, resolver, specBasePath); resolver.shouldStopOnError(err) {
+				return err
+			}
+			if def != nil {
+				spec.Definitions[key] = *def
+			}
+		}
+	}
+
+	for key := range spec.Parameters {
+		parameter := spec.Parameters[key]
+		if err := expandParameterOrResponse(&parameter, resolver, specBasePath); resolver.shouldStopOnError(err) {
+			return err
+		}
+		spec.Parameters[key] = parameter
+	}
+
+	for key := range spec.Responses {
+		response := spec.Responses[key]
+		if err := expandParameterOrResponse(&response, resolver, specBasePath); resolver.shouldStopOnError(err) {
+			return err
+		}
+		spec.Responses[key] = response
+	}
+
+	if spec.Paths != nil {
+		for key := range spec.Paths.Paths {
+			path := spec.Paths.Paths[key]
+			if err := expandPathItem(&path, resolver, specBasePath); resolver.shouldStopOnError(err) {
+				return err
+			}
+			spec.Paths.Paths[key] = path
+		}
+	}
+
+	return nil
+}
+
+// baseForRoot loads in the cache the root document and produces a fake "root" base path entry
+// for further $ref resolution
+func baseForRoot(root interface{}, cache ResolutionCache) string {
+	// cache the root document to resolve $ref's
+	const rootBase = "root"
+	if root != nil {
+		base, _ := absPath(rootBase)
+		normalizedBase := normalizeAbsPath(base)
+		debugLog("setting root doc in cache at: %s", normalizedBase)
+		if cache == nil {
+			cache = resCache
+		}
+		cache.Set(normalizedBase, root)
+		return rootBase
+	}
+	return ""
+}
+
+// ExpandSchema expands the refs in the schema object with reference to the root object
+// go-openapi/validate uses this function
+// notice that it is impossible to reference a json schema in a different file other than root
+func ExpandSchema(schema *Schema, root interface{}, cache ResolutionCache) error {
+	opts := &ExpandOptions{
+		// when a root is specified, cache the root as an in-memory document for $ref retrieval
+		RelativeBase:    baseForRoot(root, cache),
+		SkipSchemas:     false,
+		ContinueOnError: false,
+		// when no base path is specified, remaining $ref (circular) are rendered with an absolute path
+		AbsoluteCircularRef: true,
+	}
+	return ExpandSchemaWithBasePath(schema, cache, opts)
+}
+
+// ExpandSchemaWithBasePath expands the refs in the schema object, base path configured through expand options
+func ExpandSchemaWithBasePath(schema *Schema, cache ResolutionCache, opts *ExpandOptions) error {
+	if schema == nil {
+		return nil
+	}
+
+	var basePath string
+	if opts.RelativeBase != "" {
+		basePath, _ = absPath(opts.RelativeBase)
+	}
+
+	resolver, err := defaultSchemaLoader(nil, opts, cache, nil)
+	if err != nil {
+		return err
+	}
+
+	refs := []string{""}
+	var s *Schema
+	if s, err = expandSchema(*schema, refs, resolver, basePath); err != nil {
+		return err
+	}
+	*schema = *s
+	return nil
+}
+
+func expandItems(target Schema, parentRefs []string, resolver *schemaLoader, basePath string) (*Schema, error) {
+	if target.Items != nil {
+		if target.Items.Schema != nil {
+			t, err := expandSchema(*target.Items.Schema, parentRefs, resolver, basePath)
+			if err != nil {
+				return nil, err
+			}
+			*target.Items.Schema = *t
+		}
+		for i := range target.Items.Schemas {
+			t, err := expandSchema(target.Items.Schemas[i], parentRefs, resolver, basePath)
+			if err != nil {
+				return nil, err
+			}
+			target.Items.Schemas[i] = *t
+		}
+	}
+	return &target, nil
+}
+
+func expandSchema(target Schema, parentRefs []string, resolver *schemaLoader, basePath string) (*Schema, error) {
+	if target.Ref.String() == "" && target.Ref.IsRoot() {
+		// normalizing is important
+		newRef := normalizeFileRef(&target.Ref, basePath)
+		target.Ref = *newRef
+		return &target, nil
+
+	}
+
+	// change the base path of resolution when an ID is encountered
+	// otherwise the basePath should inherit the parent's
+	// important: ID can be relative path
+	if target.ID != "" {
+		debugLog("schema has ID: %s", target.ID)
+		// handling the case when id is a folder
+		// remember that basePath has to be a file
+		refPath := target.ID
+		if strings.HasSuffix(target.ID, "/") {
+			// path.Clean here would not work correctly if basepath is http
+			refPath = fmt.Sprintf("%s%s", refPath, "placeholder.json")
+		}
+		basePath = normalizePaths(refPath, basePath)
+	}
+
+	var t *Schema
+	// if Ref is found, everything else doesn't matter
+	// Ref also changes the resolution scope of children expandSchema
+	if target.Ref.String() != "" {
+		// here the resolution scope is changed because a $ref was encountered
+		normalizedRef := normalizeFileRef(&target.Ref, basePath)
+		normalizedBasePath := normalizedRef.RemoteURI()
+
+		if resolver.isCircular(normalizedRef, basePath, parentRefs...) {
+			// this means there is a cycle in the recursion tree: return the Ref
+			// - circular refs cannot be expanded. We leave them as ref.
+			// - denormalization means that a new local file ref is set relative to the original basePath
+			debugLog("shortcut circular ref: basePath: %s, normalizedPath: %s, normalized ref: %s",
+				basePath, normalizedBasePath, normalizedRef.String())
+			if !resolver.options.AbsoluteCircularRef {
+				target.Ref = *denormalizeFileRef(normalizedRef, normalizedBasePath, resolver.context.basePath)
+			} else {
+				target.Ref = *normalizedRef
+			}
+			return &target, nil
+		}
+
+		debugLog("basePath: %s: calling Resolve with target: %#v", basePath, target)
+		if err := resolver.Resolve(&target.Ref, &t, basePath); resolver.shouldStopOnError(err) {
+			return nil, err
+		}
+
+		if t != nil {
+			parentRefs = append(parentRefs, normalizedRef.String())
+			var err error
+			transitiveResolver, err := resolver.transitiveResolver(basePath, target.Ref)
+			if transitiveResolver.shouldStopOnError(err) {
+				return nil, err
+			}
+
+			basePath = resolver.updateBasePath(transitiveResolver, normalizedBasePath)
+
+			return expandSchema(*t, parentRefs, transitiveResolver, basePath)
+		}
+	}
+
+	t, err := expandItems(target, parentRefs, resolver, basePath)
+	if resolver.shouldStopOnError(err) {
+		return &target, err
+	}
+	if t != nil {
+		target = *t
+	}
+
+	for i := range target.AllOf {
+		t, err := expandSchema(target.AllOf[i], parentRefs, resolver, basePath)
+		if resolver.shouldStopOnError(err) {
+			return &target, err
+		}
+		target.AllOf[i] = *t
+	}
+	for i := range target.AnyOf {
+		t, err := expandSchema(target.AnyOf[i], parentRefs, resolver, basePath)
+		if resolver.shouldStopOnError(err) {
+			return &target, err
+		}
+		target.AnyOf[i] = *t
+	}
+	for i := range target.OneOf {
+		t, err := expandSchema(target.OneOf[i], parentRefs, resolver, basePath)
+		if resolver.shouldStopOnError(err) {
+			return &target, err
+		}
+		if t != nil {
+			target.OneOf[i] = *t
+		}
+	}
+	if target.Not != nil {
+		t, err := expandSchema(*target.Not, parentRefs, resolver, basePath)
+		if resolver.shouldStopOnError(err) {
+			return &target, err
+		}
+		if t != nil {
+			*target.Not = *t
+		}
+	}
+	for k := range target.Properties {
+		t, err := expandSchema(target.Properties[k], parentRefs, resolver, basePath)
+		if resolver.shouldStopOnError(err) {
+			return &target, err
+		}
+		if t != nil {
+			target.Properties[k] = *t
+		}
+	}
+	if target.AdditionalProperties != nil && target.AdditionalProperties.Schema != nil {
+		t, err := expandSchema(*target.AdditionalProperties.Schema, parentRefs, resolver, basePath)
+		if resolver.shouldStopOnError(err) {
+			return &target, err
+		}
+		if t != nil {
+			*target.AdditionalProperties.Schema = *t
+		}
+	}
+	for k := range target.PatternProperties {
+		t, err := expandSchema(target.PatternProperties[k], parentRefs, resolver, basePath)
+		if resolver.shouldStopOnError(err) {
+			return &target, err
+		}
+		if t != nil {
+			target.PatternProperties[k] = *t
+		}
+	}
+	for k := range target.Dependencies {
+		if target.Dependencies[k].Schema != nil {
+			t, err := expandSchema(*target.Dependencies[k].Schema, parentRefs, resolver, basePath)
+			if resolver.shouldStopOnError(err) {
+				return &target, err
+			}
+			if t != nil {
+				*target.Dependencies[k].Schema = *t
+			}
+		}
+	}
+	if target.AdditionalItems != nil && target.AdditionalItems.Schema != nil {
+		t, err := expandSchema(*target.AdditionalItems.Schema, parentRefs, resolver, basePath)
+		if resolver.shouldStopOnError(err) {
+			return &target, err
+		}
+		if t != nil {
+			*target.AdditionalItems.Schema = *t
+		}
+	}
+	for k := range target.Definitions {
+		t, err := expandSchema(target.Definitions[k], parentRefs, resolver, basePath)
+		if resolver.shouldStopOnError(err) {
+			return &target, err
+		}
+		if t != nil {
+			target.Definitions[k] = *t
+		}
+	}
+	return &target, nil
+}
+
+func expandPathItem(pathItem *PathItem, resolver *schemaLoader, basePath string) error {
+	if pathItem == nil {
+		return nil
+	}
+
+	parentRefs := []string{}
+	if err := resolver.deref(pathItem, parentRefs, basePath); resolver.shouldStopOnError(err) {
+		return err
+	}
+	if pathItem.Ref.String() != "" {
+		var err error
+		resolver, err = resolver.transitiveResolver(basePath, pathItem.Ref)
+		if resolver.shouldStopOnError(err) {
+			return err
+		}
+	}
+	pathItem.Ref = Ref{}
+
+	for idx := range pathItem.Parameters {
+		if err := expandParameterOrResponse(&(pathItem.Parameters[idx]), resolver, basePath); resolver.shouldStopOnError(err) {
+			return err
+		}
+	}
+	ops := []*Operation{
+		pathItem.Get,
+		pathItem.Head,
+		pathItem.Options,
+		pathItem.Put,
+		pathItem.Post,
+		pathItem.Patch,
+		pathItem.Delete,
+	}
+	for _, op := range ops {
+		if err := expandOperation(op, resolver, basePath); resolver.shouldStopOnError(err) {
+			return err
+		}
+	}
+	return nil
+}
+
+func expandOperation(op *Operation, resolver *schemaLoader, basePath string) error {
+	if op == nil {
+		return nil
+	}
+
+	for i := range op.Parameters {
+		param := op.Parameters[i]
+		if err := expandParameterOrResponse(&param, resolver, basePath); resolver.shouldStopOnError(err) {
+			return err
+		}
+		op.Parameters[i] = param
+	}
+
+	if op.Responses != nil {
+		responses := op.Responses
+		if err := expandParameterOrResponse(responses.Default, resolver, basePath); resolver.shouldStopOnError(err) {
+			return err
+		}
+		for code := range responses.StatusCodeResponses {
+			response := responses.StatusCodeResponses[code]
+			if err := expandParameterOrResponse(&response, resolver, basePath); resolver.shouldStopOnError(err) {
+				return err
+			}
+			responses.StatusCodeResponses[code] = response
+		}
+	}
+	return nil
+}
+
+// ExpandResponseWithRoot expands a response based on a root document, not a fetchable document
+func ExpandResponseWithRoot(response *Response, root interface{}, cache ResolutionCache) error {
+	opts := &ExpandOptions{
+		RelativeBase:    baseForRoot(root, cache),
+		SkipSchemas:     false,
+		ContinueOnError: false,
+		// when no base path is specified, remaining $ref (circular) are rendered with an absolute path
+		AbsoluteCircularRef: true,
+	}
+	resolver, err := defaultSchemaLoader(root, opts, nil, nil)
+	if err != nil {
+		return err
+	}
+
+	return expandParameterOrResponse(response, resolver, opts.RelativeBase)
+}
+
+// ExpandResponse expands a response based on a basepath
+// This is the exported version of expandResponse
+// all refs inside response will be resolved relative to basePath
+func ExpandResponse(response *Response, basePath string) error {
+	var specBasePath string
+	if basePath != "" {
+		specBasePath, _ = absPath(basePath)
+	}
+	opts := &ExpandOptions{
+		RelativeBase: specBasePath,
+	}
+	resolver, err := defaultSchemaLoader(nil, opts, nil, nil)
+	if err != nil {
+		return err
+	}
+
+	return expandParameterOrResponse(response, resolver, opts.RelativeBase)
+}
+
+// ExpandParameterWithRoot expands a parameter based on a root document, not a fetchable document
+func ExpandParameterWithRoot(parameter *Parameter, root interface{}, cache ResolutionCache) error {
+	opts := &ExpandOptions{
+		RelativeBase:    baseForRoot(root, cache),
+		SkipSchemas:     false,
+		ContinueOnError: false,
+		// when no base path is specified, remaining $ref (circular) are rendered with an absolute path
+		AbsoluteCircularRef: true,
+	}
+	resolver, err := defaultSchemaLoader(root, opts, nil, nil)
+	if err != nil {
+		return err
+	}
+
+	return expandParameterOrResponse(parameter, resolver, opts.RelativeBase)
+}
+
+// ExpandParameter expands a parameter based on a basepath.
+// This is the exported version of expandParameter
+// all refs inside parameter will be resolved relative to basePath
+func ExpandParameter(parameter *Parameter, basePath string) error {
+	var specBasePath string
+	if basePath != "" {
+		specBasePath, _ = absPath(basePath)
+	}
+	opts := &ExpandOptions{
+		RelativeBase: specBasePath,
+	}
+	resolver, err := defaultSchemaLoader(nil, opts, nil, nil)
+	if err != nil {
+		return err
+	}
+
+	return expandParameterOrResponse(parameter, resolver, opts.RelativeBase)
+}
+
+func getRefAndSchema(input interface{}) (*Ref, *Schema, error) {
+	var ref *Ref
+	var sch *Schema
+	switch refable := input.(type) {
+	case *Parameter:
+		if refable == nil {
+			return nil, nil, nil
+		}
+		ref = &refable.Ref
+		sch = refable.Schema
+	case *Response:
+		if refable == nil {
+			return nil, nil, nil
+		}
+		ref = &refable.Ref
+		sch = refable.Schema
+	default:
+		return nil, nil, fmt.Errorf("expand: unsupported type %T. Input should be of type *Parameter or *Response", input)
+	}
+	return ref, sch, nil
+}
+
+func expandParameterOrResponse(input interface{}, resolver *schemaLoader, basePath string) error {
+	ref, _, err := getRefAndSchema(input)
+	if err != nil {
+		return err
+	}
+	if ref == nil {
+		return nil
+	}
+	parentRefs := []string{}
+	if err := resolver.deref(input, parentRefs, basePath); resolver.shouldStopOnError(err) {
+		return err
+	}
+	ref, sch, _ := getRefAndSchema(input)
+	if ref.String() != "" {
+		transitiveResolver, err := resolver.transitiveResolver(basePath, *ref)
+		if transitiveResolver.shouldStopOnError(err) {
+			return err
+		}
+		basePath = resolver.updateBasePath(transitiveResolver, basePath)
+		resolver = transitiveResolver
+	}
+
+	if sch != nil && sch.Ref.String() != "" {
+		// schema expanded to a $ref in another root
+		var ern error
+		sch.Ref, ern = NewRef(normalizePaths(sch.Ref.String(), ref.RemoteURI()))
+		if ern != nil {
+			return ern
+		}
+	}
+	if ref != nil {
+		*ref = Ref{}
+	}
+
+	if !resolver.options.SkipSchemas && sch != nil {
+		s, err := expandSchema(*sch, parentRefs, resolver, basePath)
+		if resolver.shouldStopOnError(err) {
+			return err
+		}
+		*sch = *s
+	}
+	return nil
+}
diff --git a/vendor/github.com/go-openapi/spec/external_docs.go b/vendor/github.com/go-openapi/spec/external_docs.go
new file mode 100644
index 00000000..88add91b
--- /dev/null
+++ b/vendor/github.com/go-openapi/spec/external_docs.go
@@ -0,0 +1,24 @@
+// Copyright 2015 go-swagger maintainers
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//    http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package spec
+
+// ExternalDocumentation allows referencing an external resource for
+// extended documentation.
+//
+// For more information: http://goo.gl/8us55a#externalDocumentationObject
+type ExternalDocumentation struct {
+	Description string `json:"description,omitempty"`
+	URL         string `json:"url,omitempty"`
+}
diff --git a/vendor/github.com/go-openapi/spec/go.mod b/vendor/github.com/go-openapi/spec/go.mod
new file mode 100644
index 00000000..02a142c0
--- /dev/null
+++ b/vendor/github.com/go-openapi/spec/go.mod
@@ -0,0 +1,17 @@
+module github.com/go-openapi/spec
+
+require (
+	github.com/go-openapi/jsonpointer v0.19.3
+	github.com/go-openapi/jsonreference v0.19.2
+	github.com/go-openapi/swag v0.19.5
+	github.com/kr/pty v1.1.5 // indirect
+	github.com/stretchr/objx v0.2.0 // indirect
+	github.com/stretchr/testify v1.3.0
+	golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8 // indirect
+	golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297 // indirect
+	golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f // indirect
+	golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59 // indirect
+	gopkg.in/yaml.v2 v2.2.2
+)
+
+go 1.13
diff --git a/vendor/github.com/go-openapi/spec/go.sum b/vendor/github.com/go-openapi/spec/go.sum
new file mode 100644
index 00000000..86db601c
--- /dev/null
+++ b/vendor/github.com/go-openapi/spec/go.sum
@@ -0,0 +1,74 @@
+github.com/PuerkitoBio/purell v1.1.0 h1:rmGxhojJlM0tuKtfdvliR84CFHljx9ag64t2xmVkjK4=
+github.com/PuerkitoBio/purell v1.1.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
+github.com/PuerkitoBio/purell v1.1.1 h1:WEQqlqaGbrPkxLJWfBwQmfEAE1Z7ONdDLqrN38tNFfI=
+github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
+github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV/sSk/8dngufqelfh6jnri85riMAaF/M=
+github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
+github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
+github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/go-openapi/jsonpointer v0.17.0 h1:nH6xp8XdXHx8dqveo0ZuJBluCO2qGrPbDNZ0dwoRHP0=
+github.com/go-openapi/jsonpointer v0.17.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M=
+github.com/go-openapi/jsonpointer v0.19.0 h1:FTUMcX77w5rQkClIzDtTxvn6Bsa894CcrzNj2MMfeg8=
+github.com/go-openapi/jsonpointer v0.19.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M=
+github.com/go-openapi/jsonpointer v0.19.2 h1:A9+F4Dc/MCNB5jibxf6rRvOvR/iFgQdyNx9eIhnGqq0=
+github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg=
+github.com/go-openapi/jsonpointer v0.19.3 h1:gihV7YNZK1iK6Tgwwsxo2rJbD1GTbdm72325Bq8FI3w=
+github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
+github.com/go-openapi/jsonreference v0.19.0 h1:BqWKpV1dFd+AuiKlgtddwVIFQsuMpxfBDBHGfM2yNpk=
+github.com/go-openapi/jsonreference v0.19.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I=
+github.com/go-openapi/jsonreference v0.19.2 h1:o20suLFB4Ri0tuzpWtyHlh7E7HnkqTNLq6aR6WVNS1w=
+github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc=
+github.com/go-openapi/swag v0.17.0 h1:iqrgMg7Q7SvtbWLlltPrkMs0UBJI6oTSs79JFRUi880=
+github.com/go-openapi/swag v0.17.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg=
+github.com/go-openapi/swag v0.19.2 h1:jvO6bCMBEilGwMfHhrd61zIID4oIFdwb76V17SM88dE=
+github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
+github.com/go-openapi/swag v0.19.5 h1:lTz6Ys4CmqqCQmZPBlbQENR1/GucA2bzYTE12Pw4tFY=
+github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
+github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
+github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
+github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
+github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA=
+github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
+github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
+github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329 h1:2gxZ0XQIU/5z3Z3bUBu+FXuk2pFbkN6tcwi/pjyaDic=
+github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
+github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63 h1:nTT4s92Dgz2HlrB2NaMgvlfqHH39OgMhA7z3PK7PGD4=
+github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
+github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e h1:hB2xlXdHp/pmPZq0y3QnmWAArdw9PqbmotexnWx/FU8=
+github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
+github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
+github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE=
+github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w=
+github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
+github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q=
+github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
+golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
+golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/net v0.0.0-20181005035420-146acd28ed58 h1:otZG8yDCO4LVps5+9bxOeNiCvgmOyt96J3roHTYs7oE=
+golang.org/x/net v0.0.0-20181005035420-146acd28ed58/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190613194153-d28f0bde5980 h1:dfGZHvZk057jK2MCeWus/TowKpJ8y4AmooUzdBSR9GU=
+golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297 h1:k7pJ2yAPLPgbskkFdhRCsA77k2fySZ1zf2zCjvQCiIM=
+golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
+golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=
+golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
+golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=
+gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/yaml.v2 v2.2.1 h1:mUhvW9EsL+naU5Q3cakzfE91YhliOondGd6ZrsDBHQE=
+gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw=
+gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
diff --git a/vendor/github.com/go-openapi/spec/header.go b/vendor/github.com/go-openapi/spec/header.go
new file mode 100644
index 00000000..39efe452
--- /dev/null
+++ b/vendor/github.com/go-openapi/spec/header.go
@@ -0,0 +1,197 @@
+// Copyright 2015 go-swagger maintainers
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//    http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package spec
+
+import (
+	"encoding/json"
+	"strings"
+
+	"github.com/go-openapi/jsonpointer"
+	"github.com/go-openapi/swag"
+)
+
+const (
+	jsonArray = "array"
+)
+
+// HeaderProps describes a response header
+type HeaderProps struct {
+	Description string `json:"description,omitempty"`
+}
+
+// Header describes a header for a response of the API
+//
+// For more information: http://goo.gl/8us55a#headerObject
+type Header struct {
+	CommonValidations
+	SimpleSchema
+	VendorExtensible
+	HeaderProps
+}
+
+// ResponseHeader creates a new header instance for use in a response
+func ResponseHeader() *Header {
+	return new(Header)
+}
+
+// WithDescription sets the description on this response, allows for chaining
+func (h *Header) WithDescription(description string) *Header {
+	h.Description = description
+	return h
+}
+
+// Typed a fluent builder method for the type of parameter
+func (h *Header) Typed(tpe, format string) *Header {
+	h.Type = tpe
+	h.Format = format
+	return h
+}
+
+// CollectionOf a fluent builder method for an array item
+func (h *Header) CollectionOf(items *Items, format string) *Header {
+	h.Type = jsonArray
+	h.Items = items
+	h.CollectionFormat = format
+	return h
+}
+
+// WithDefault sets the default value on this item
+func (h *Header) WithDefault(defaultValue interface{}) *Header {
+	h.Default = defaultValue
+	return h
+}
+
+// WithMaxLength sets a max length value
+func (h *Header) WithMaxLength(max int64) *Header {
+	h.MaxLength = &max
+	return h
+}
+
+// WithMinLength sets a min length value
+func (h *Header) WithMinLength(min int64) *Header {
+	h.MinLength = &min
+	return h
+}
+
+// WithPattern sets a pattern value
+func (h *Header) WithPattern(pattern string) *Header {
+	h.Pattern = pattern
+	return h
+}
+
+// WithMultipleOf sets a multiple of value
+func (h *Header) WithMultipleOf(number float64) *Header {
+	h.MultipleOf = &number
+	return h
+}
+
+// WithMaximum sets a maximum number value
+func (h *Header) WithMaximum(max float64, exclusive bool) *Header {
+	h.Maximum = &max
+	h.ExclusiveMaximum = exclusive
+	return h
+}
+
+// WithMinimum sets a minimum number value
+func (h *Header) WithMinimum(min float64, exclusive bool) *Header {
+	h.Minimum = &min
+	h.ExclusiveMinimum = exclusive
+	return h
+}
+
+// WithEnum sets a the enum values (replace)
+func (h *Header) WithEnum(values ...interface{}) *Header {
+	h.Enum = append([]interface{}{}, values...)
+	return h
+}
+
+// WithMaxItems sets the max items
+func (h *Header) WithMaxItems(size int64) *Header {
+	h.MaxItems = &size
+	return h
+}
+
+// WithMinItems sets the min items
+func (h *Header) WithMinItems(size int64) *Header {
+	h.MinItems = &size
+	return h
+}
+
+// UniqueValues dictates that this array can only have unique items
+func (h *Header) UniqueValues() *Header {
+	h.UniqueItems = true
+	return h
+}
+
+// AllowDuplicates this array can have duplicates
+func (h *Header) AllowDuplicates() *Header {
+	h.UniqueItems = false
+	return h
+}
+
+// MarshalJSON marshal this to JSON
+func (h Header) MarshalJSON() ([]byte, error) {
+	b1, err := json.Marshal(h.CommonValidations)
+	if err != nil {
+		return nil, err
+	}
+	b2, err := json.Marshal(h.SimpleSchema)
+	if err != nil {
+		return nil, err
+	}
+	b3, err := json.Marshal(h.HeaderProps)
+	if err != nil {
+		return nil, err
+	}
+	return swag.ConcatJSON(b1, b2, b3), nil
+}
+
+// UnmarshalJSON unmarshals this header from JSON
+func (h *Header) UnmarshalJSON(data []byte) error {
+	if err := json.Unmarshal(data, &h.CommonValidations); err != nil {
+		return err
+	}
+	if err := json.Unmarshal(data, &h.SimpleSchema); err != nil {
+		return err
+	}
+	if err := json.Unmarshal(data, &h.VendorExtensible); err != nil {
+		return err
+	}
+	return json.Unmarshal(data, &h.HeaderProps)
+}
+
+// JSONLookup look up a value by the json property name
+func (h Header) JSONLookup(token string) (interface{}, error) {
+	if ex, ok := h.Extensions[token]; ok {
+		return &ex, nil
+	}
+
+	r, _, err := jsonpointer.GetForToken(h.CommonValidations, token)
+	if err != nil && !strings.HasPrefix(err.Error(), "object has no field") {
+		return nil, err
+	}
+	if r != nil {
+		return r, nil
+	}
+	r, _, err = jsonpointer.GetForToken(h.SimpleSchema, token)
+	if err != nil && !strings.HasPrefix(err.Error(), "object has no field") {
+		return nil, err
+	}
+	if r != nil {
+		return r, nil
+	}
+	r, _, err = jsonpointer.GetForToken(h.HeaderProps, token)
+	return r, err
+}
diff --git a/vendor/github.com/go-openapi/spec/info.go b/vendor/github.com/go-openapi/spec/info.go
new file mode 100644
index 00000000..c458b49b
--- /dev/null
+++ b/vendor/github.com/go-openapi/spec/info.go
@@ -0,0 +1,165 @@
+// Copyright 2015 go-swagger maintainers
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//    http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package spec
+
+import (
+	"encoding/json"
+	"strings"
+
+	"github.com/go-openapi/jsonpointer"
+	"github.com/go-openapi/swag"
+)
+
+// Extensions vendor specific extensions
+type Extensions map[string]interface{}
+
+// Add adds a value to these extensions
+func (e Extensions) Add(key string, value interface{}) {
+	realKey := strings.ToLower(key)
+	e[realKey] = value
+}
+
+// GetString gets a string value from the extensions
+func (e Extensions) GetString(key string) (string, bool) {
+	if v, ok := e[strings.ToLower(key)]; ok {
+		str, ok := v.(string)
+		return str, ok
+	}
+	return "", false
+}
+
+// GetBool gets a string value from the extensions
+func (e Extensions) GetBool(key string) (bool, bool) {
+	if v, ok := e[strings.ToLower(key)]; ok {
+		str, ok := v.(bool)
+		return str, ok
+	}
+	return false, false
+}
+
+// GetStringSlice gets a string value from the extensions
+func (e Extensions) GetStringSlice(key string) ([]string, bool) {
+	if v, ok := e[strings.ToLower(key)]; ok {
+		arr, isSlice := v.([]interface{})
+		if !isSlice {
+			return nil, false
+		}
+		var strs []string
+		for _, iface := range arr {
+			str, isString := iface.(string)
+			if !isString {
+				return nil, false
+			}
+			strs = append(strs, str)
+		}
+		return strs, ok
+	}
+	return nil, false
+}
+
+// VendorExtensible composition block.
+type VendorExtensible struct {
+	Extensions Extensions
+}
+
+// AddExtension adds an extension to this extensible object
+func (v *VendorExtensible) AddExtension(key string, value interface{}) {
+	if value == nil {
+		return
+	}
+	if v.Extensions == nil {
+		v.Extensions = make(map[string]interface{})
+	}
+	v.Extensions.Add(key, value)
+}
+
+// MarshalJSON marshals the extensions to json
+func (v VendorExtensible) MarshalJSON() ([]byte, error) {
+	toser := make(map[string]interface{})
+	for k, v := range v.Extensions {
+		lk := strings.ToLower(k)
+		if strings.HasPrefix(lk, "x-") {
+			toser[k] = v
+		}
+	}
+	return json.Marshal(toser)
+}
+
+// UnmarshalJSON for this extensible object
+func (v *VendorExtensible) UnmarshalJSON(data []byte) error {
+	var d map[string]interface{}
+	if err := json.Unmarshal(data, &d); err != nil {
+		return err
+	}
+	for k, vv := range d {
+		lk := strings.ToLower(k)
+		if strings.HasPrefix(lk, "x-") {
+			if v.Extensions == nil {
+				v.Extensions = map[string]interface{}{}
+			}
+			v.Extensions[k] = vv
+		}
+	}
+	return nil
+}
+
+// InfoProps the properties for an info definition
+type InfoProps struct {
+	Description    string       `json:"description,omitempty"`
+	Title          string       `json:"title,omitempty"`
+	TermsOfService string       `json:"termsOfService,omitempty"`
+	Contact        *ContactInfo `json:"contact,omitempty"`
+	License        *License     `json:"license,omitempty"`
+	Version        string       `json:"version,omitempty"`
+}
+
+// Info object provides metadata about the API.
+// The metadata can be used by the clients if needed, and can be presented in the Swagger-UI for convenience.
+//
+// For more information: http://goo.gl/8us55a#infoObject
+type Info struct {
+	VendorExtensible
+	InfoProps
+}
+
+// JSONLookup look up a value by the json property name
+func (i Info) JSONLookup(token string) (interface{}, error) {
+	if ex, ok := i.Extensions[token]; ok {
+		return &ex, nil
+	}
+	r, _, err := jsonpointer.GetForToken(i.InfoProps, token)
+	return r, err
+}
+
+// MarshalJSON marshal this to JSON
+func (i Info) MarshalJSON() ([]byte, error) {
+	b1, err := json.Marshal(i.InfoProps)
+	if err != nil {
+		return nil, err
+	}
+	b2, err := json.Marshal(i.VendorExtensible)
+	if err != nil {
+		return nil, err
+	}
+	return swag.ConcatJSON(b1, b2), nil
+}
+
+// UnmarshalJSON marshal this from JSON
+func (i *Info) UnmarshalJSON(data []byte) error {
+	if err := json.Unmarshal(data, &i.InfoProps); err != nil {
+		return err
+	}
+	return json.Unmarshal(data, &i.VendorExtensible)
+}
diff --git a/vendor/github.com/go-openapi/spec/items.go b/vendor/github.com/go-openapi/spec/items.go
new file mode 100644
index 00000000..365d1631
--- /dev/null
+++ b/vendor/github.com/go-openapi/spec/items.go
@@ -0,0 +1,244 @@
+// Copyright 2015 go-swagger maintainers
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//    http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package spec
+
+import (
+	"encoding/json"
+	"strings"
+
+	"github.com/go-openapi/jsonpointer"
+	"github.com/go-openapi/swag"
+)
+
+const (
+	jsonRef = "$ref"
+)
+
+// SimpleSchema describe swagger simple schemas for parameters and headers
+type SimpleSchema struct {
+	Type             string      `json:"type,omitempty"`
+	Nullable         bool        `json:"nullable,omitempty"`
+	Format           string      `json:"format,omitempty"`
+	Items            *Items      `json:"items,omitempty"`
+	CollectionFormat string      `json:"collectionFormat,omitempty"`
+	Default          interface{} `json:"default,omitempty"`
+	Example          interface{} `json:"example,omitempty"`
+}
+
+// TypeName return the type (or format) of a simple schema
+func (s *SimpleSchema) TypeName() string {
+	if s.Format != "" {
+		return s.Format
+	}
+	return s.Type
+}
+
+// ItemsTypeName yields the type of items in a simple schema array
+func (s *SimpleSchema) ItemsTypeName() string {
+	if s.Items == nil {
+		return ""
+	}
+	return s.Items.TypeName()
+}
+
+// CommonValidations describe common JSON-schema validations
+type CommonValidations struct {
+	Maximum          *float64      `json:"maximum,omitempty"`
+	ExclusiveMaximum bool          `json:"exclusiveMaximum,omitempty"`
+	Minimum          *float64      `json:"minimum,omitempty"`
+	ExclusiveMinimum bool          `json:"exclusiveMinimum,omitempty"`
+	MaxLength        *int64        `json:"maxLength,omitempty"`
+	MinLength        *int64        `json:"minLength,omitempty"`
+	Pattern          string        `json:"pattern,omitempty"`
+	MaxItems         *int64        `json:"maxItems,omitempty"`
+	MinItems         *int64        `json:"minItems,omitempty"`
+	UniqueItems      bool          `json:"uniqueItems,omitempty"`
+	MultipleOf       *float64      `json:"multipleOf,omitempty"`
+	Enum             []interface{} `json:"enum,omitempty"`
+}
+
+// Items a limited subset of JSON-Schema's items object.
+// It is used by parameter definitions that are not located in "body".
+//
+// For more information: http://goo.gl/8us55a#items-object
+type Items struct {
+	Refable
+	CommonValidations
+	SimpleSchema
+	VendorExtensible
+}
+
+// NewItems creates a new instance of items
+func NewItems() *Items {
+	return &Items{}
+}
+
+// Typed a fluent builder method for the type of item
+func (i *Items) Typed(tpe, format string) *Items {
+	i.Type = tpe
+	i.Format = format
+	return i
+}
+
+// AsNullable flags this schema as nullable.
+func (i *Items) AsNullable() *Items {
+	i.Nullable = true
+	return i
+}
+
+// CollectionOf a fluent builder method for an array item
+func (i *Items) CollectionOf(items *Items, format string) *Items {
+	i.Type = jsonArray
+	i.Items = items
+	i.CollectionFormat = format
+	return i
+}
+
+// WithDefault sets the default value on this item
+func (i *Items) WithDefault(defaultValue interface{}) *Items {
+	i.Default = defaultValue
+	return i
+}
+
+// WithMaxLength sets a max length value
+func (i *Items) WithMaxLength(max int64) *Items {
+	i.MaxLength = &max
+	return i
+}
+
+// WithMinLength sets a min length value
+func (i *Items) WithMinLength(min int64) *Items {
+	i.MinLength = &min
+	return i
+}
+
+// WithPattern sets a pattern value
+func (i *Items) WithPattern(pattern string) *Items {
+	i.Pattern = pattern
+	return i
+}
+
+// WithMultipleOf sets a multiple of value
+func (i *Items) WithMultipleOf(number float64) *Items {
+	i.MultipleOf = &number
+	return i
+}
+
+// WithMaximum sets a maximum number value
+func (i *Items) WithMaximum(max float64, exclusive bool) *Items {
+	i.Maximum = &max
+	i.ExclusiveMaximum = exclusive
+	return i
+}
+
+// WithMinimum sets a minimum number value
+func (i *Items) WithMinimum(min float64, exclusive bool) *Items {
+	i.Minimum = &min
+	i.ExclusiveMinimum = exclusive
+	return i
+}
+
+// WithEnum sets a the enum values (replace)
+func (i *Items) WithEnum(values ...interface{}) *Items {
+	i.Enum = append([]interface{}{}, values...)
+	return i
+}
+
+// WithMaxItems sets the max items
+func (i *Items) WithMaxItems(size int64) *Items {
+	i.MaxItems = &size
+	return i
+}
+
+// WithMinItems sets the min items
+func (i *Items) WithMinItems(size int64) *Items {
+	i.MinItems = &size
+	return i
+}
+
+// UniqueValues dictates that this array can only have unique items
+func (i *Items) UniqueValues() *Items {
+	i.UniqueItems = true
+	return i
+}
+
+// AllowDuplicates this array can have duplicates
+func (i *Items) AllowDuplicates() *Items {
+	i.UniqueItems = false
+	return i
+}
+
+// UnmarshalJSON hydrates this items instance with the data from JSON
+func (i *Items) UnmarshalJSON(data []byte) error {
+	var validations CommonValidations
+	if err := json.Unmarshal(data, &validations); err != nil {
+		return err
+	}
+	var ref Refable
+	if err := json.Unmarshal(data, &ref); err != nil {
+		return err
+	}
+	var simpleSchema SimpleSchema
+	if err := json.Unmarshal(data, &simpleSchema); err != nil {
+		return err
+	}
+	var vendorExtensible VendorExtensible
+	if err := json.Unmarshal(data, &vendorExtensible); err != nil {
+		return err
+	}
+	i.Refable = ref
+	i.CommonValidations = validations
+	i.SimpleSchema = simpleSchema
+	i.VendorExtensible = vendorExtensible
+	return nil
+}
+
+// MarshalJSON converts this items object to JSON
+func (i Items) MarshalJSON() ([]byte, error) {
+	b1, err := json.Marshal(i.CommonValidations)
+	if err != nil {
+		return nil, err
+	}
+	b2, err := json.Marshal(i.SimpleSchema)
+	if err != nil {
+		return nil, err
+	}
+	b3, err := json.Marshal(i.Refable)
+	if err != nil {
+		return nil, err
+	}
+	b4, err := json.Marshal(i.VendorExtensible)
+	if err != nil {
+		return nil, err
+	}
+	return swag.ConcatJSON(b4, b3, b1, b2), nil
+}
+
+// JSONLookup look up a value by the json property name
+func (i Items) JSONLookup(token string) (interface{}, error) {
+	if token == jsonRef {
+		return &i.Ref, nil
+	}
+
+	r, _, err := jsonpointer.GetForToken(i.CommonValidations, token)
+	if err != nil && !strings.HasPrefix(err.Error(), "object has no field") {
+		return nil, err
+	}
+	if r != nil {
+		return r, nil
+	}
+	r, _, err = jsonpointer.GetForToken(i.SimpleSchema, token)
+	return r, err
+}
diff --git a/vendor/github.com/go-openapi/spec/license.go b/vendor/github.com/go-openapi/spec/license.go
new file mode 100644
index 00000000..f20961b4
--- /dev/null
+++ b/vendor/github.com/go-openapi/spec/license.go
@@ -0,0 +1,23 @@
+// Copyright 2015 go-swagger maintainers
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//    http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package spec
+
+// License information for the exposed API.
+//
+// For more information: http://goo.gl/8us55a#licenseObject
+type License struct {
+	Name string `json:"name,omitempty"`
+	URL  string `json:"url,omitempty"`
+}
diff --git a/vendor/github.com/go-openapi/spec/normalizer.go b/vendor/github.com/go-openapi/spec/normalizer.go
new file mode 100644
index 00000000..b8957e7c
--- /dev/null
+++ b/vendor/github.com/go-openapi/spec/normalizer.go
@@ -0,0 +1,152 @@
+// Copyright 2015 go-swagger maintainers
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//    http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package spec
+
+import (
+	"fmt"
+	"net/url"
+	"os"
+	"path"
+	"path/filepath"
+	"strings"
+)
+
+// normalize absolute path for cache.
+// on Windows, drive letters should be converted to lower as scheme in net/url.URL
+func normalizeAbsPath(path string) string {
+	u, err := url.Parse(path)
+	if err != nil {
+		debugLog("normalize absolute path failed: %s", err)
+		return path
+	}
+	return u.String()
+}
+
+// base or refPath could be a file path or a URL
+// given a base absolute path and a ref path, return the absolute path of refPath
+// 1) if refPath is absolute, return it
+// 2) if refPath is relative, join it with basePath keeping the scheme, hosts, and ports if exists
+// base could be a directory or a full file path
+func normalizePaths(refPath, base string) string {
+	refURL, _ := url.Parse(refPath)
+	if path.IsAbs(refURL.Path) || filepath.IsAbs(refPath) {
+		// refPath is actually absolute
+		if refURL.Host != "" {
+			return refPath
+		}
+		parts := strings.Split(refPath, "#")
+		result := filepath.FromSlash(parts[0])
+		if len(parts) == 2 {
+			result += "#" + parts[1]
+		}
+		return result
+	}
+
+	// relative refPath
+	baseURL, _ := url.Parse(base)
+	if !strings.HasPrefix(refPath, "#") {
+		// combining paths
+		if baseURL.Host != "" {
+			baseURL.Path = path.Join(path.Dir(baseURL.Path), refURL.Path)
+		} else { // base is a file
+			newBase := fmt.Sprintf("%s#%s", filepath.Join(filepath.Dir(base), filepath.FromSlash(refURL.Path)), refURL.Fragment)
+			return newBase
+		}
+
+	}
+	// copying fragment from ref to base
+	baseURL.Fragment = refURL.Fragment
+	return baseURL.String()
+}
+
+// denormalizePaths returns to simplest notation on file $ref,
+// i.e. strips the absolute path and sets a path relative to the base path.
+//
+// This is currently used when we rewrite ref after a circular ref has been detected
+func denormalizeFileRef(ref *Ref, relativeBase, originalRelativeBase string) *Ref {
+	debugLog("denormalizeFileRef for: %s", ref.String())
+
+	if ref.String() == "" || ref.IsRoot() || ref.HasFragmentOnly {
+		return ref
+	}
+	// strip relativeBase from URI
+	relativeBaseURL, _ := url.Parse(relativeBase)
+	relativeBaseURL.Fragment = ""
+
+	if relativeBaseURL.IsAbs() && strings.HasPrefix(ref.String(), relativeBase) {
+		// this should work for absolute URI (e.g. http://...): we have an exact match, just trim prefix
+		r, _ := NewRef(strings.TrimPrefix(ref.String(), relativeBase))
+		return &r
+	}
+
+	if relativeBaseURL.IsAbs() {
+		// other absolute URL get unchanged (i.e. with a non-empty scheme)
+		return ref
+	}
+
+	// for relative file URIs:
+	originalRelativeBaseURL, _ := url.Parse(originalRelativeBase)
+	originalRelativeBaseURL.Fragment = ""
+	if strings.HasPrefix(ref.String(), originalRelativeBaseURL.String()) {
+		// the resulting ref is in the expanded spec: return a local ref
+		r, _ := NewRef(strings.TrimPrefix(ref.String(), originalRelativeBaseURL.String()))
+		return &r
+	}
+
+	// check if we may set a relative path, considering the original base path for this spec.
+	// Example:
+	//   spec is located at /mypath/spec.json
+	//   my normalized ref points to: /mypath/item.json#/target
+	//   expected result: item.json#/target
+	parts := strings.Split(ref.String(), "#")
+	relativePath, err := filepath.Rel(path.Dir(originalRelativeBaseURL.String()), parts[0])
+	if err != nil {
+		// there is no common ancestor (e.g. different drives on windows)
+		// leaves the ref unchanged
+		return ref
+	}
+	if len(parts) == 2 {
+		relativePath += "#" + parts[1]
+	}
+	r, _ := NewRef(relativePath)
+	return &r
+}
+
+// relativeBase could be an ABSOLUTE file path or an ABSOLUTE URL
+func normalizeFileRef(ref *Ref, relativeBase string) *Ref {
+	// This is important for when the reference is pointing to the root schema
+	if ref.String() == "" {
+		r, _ := NewRef(relativeBase)
+		return &r
+	}
+
+	debugLog("normalizing %s against %s", ref.String(), relativeBase)
+
+	s := normalizePaths(ref.String(), relativeBase)
+	r, _ := NewRef(s)
+	return &r
+}
+
+// absPath returns the absolute path of a file
+func absPath(fname string) (string, error) {
+	if strings.HasPrefix(fname, "http") {
+		return fname, nil
+	}
+	if filepath.IsAbs(fname) {
+		return fname, nil
+	}
+	wd, err := os.Getwd()
+	return filepath.Join(wd, fname), err
+}
diff --git a/vendor/github.com/go-openapi/spec/operation.go b/vendor/github.com/go-openapi/spec/operation.go
new file mode 100644
index 00000000..b1ebd599
--- /dev/null
+++ b/vendor/github.com/go-openapi/spec/operation.go
@@ -0,0 +1,398 @@
+// Copyright 2015 go-swagger maintainers
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//    http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package spec
+
+import (
+	"bytes"
+	"encoding/gob"
+	"encoding/json"
+	"sort"
+
+	"github.com/go-openapi/jsonpointer"
+	"github.com/go-openapi/swag"
+)
+
+func init() {
+	//gob.Register(map[string][]interface{}{})
+	gob.Register(map[string]interface{}{})
+	gob.Register([]interface{}{})
+}
+
+// OperationProps describes an operation
+//
+// NOTES:
+// - schemes, when present must be from [http, https, ws, wss]: see validate
+// - Security is handled as a special case: see MarshalJSON function
+type OperationProps struct {
+	Description  string                 `json:"description,omitempty"`
+	Consumes     []string               `json:"consumes,omitempty"`
+	Produces     []string               `json:"produces,omitempty"`
+	Schemes      []string               `json:"schemes,omitempty"`
+	Tags         []string               `json:"tags,omitempty"`
+	Summary      string                 `json:"summary,omitempty"`
+	ExternalDocs *ExternalDocumentation `json:"externalDocs,omitempty"`
+	ID           string                 `json:"operationId,omitempty"`
+	Deprecated   bool                   `json:"deprecated,omitempty"`
+	Security     []map[string][]string  `json:"security,omitempty"`
+	Parameters   []Parameter            `json:"parameters,omitempty"`
+	Responses    *Responses             `json:"responses,omitempty"`
+}
+
+// MarshalJSON takes care of serializing operation properties to JSON
+//
+// We use a custom marhaller here to handle a special cases related to
+// the Security field. We need to preserve zero length slice
+// while omitting the field when the value is nil/unset.
+func (op OperationProps) MarshalJSON() ([]byte, error) {
+	type Alias OperationProps
+	if op.Security == nil {
+		return json.Marshal(&struct {
+			Security []map[string][]string `json:"security,omitempty"`
+			*Alias
+		}{
+			Security: op.Security,
+			Alias:    (*Alias)(&op),
+		})
+	}
+	return json.Marshal(&struct {
+		Security []map[string][]string `json:"security"`
+		*Alias
+	}{
+		Security: op.Security,
+		Alias:    (*Alias)(&op),
+	})
+}
+
+// Operation describes a single API operation on a path.
+//
+// For more information: http://goo.gl/8us55a#operationObject
+type Operation struct {
+	VendorExtensible
+	OperationProps
+}
+
+// SuccessResponse gets a success response model
+func (o *Operation) SuccessResponse() (*Response, int, bool) {
+	if o.Responses == nil {
+		return nil, 0, false
+	}
+
+	responseCodes := make([]int, 0, len(o.Responses.StatusCodeResponses))
+	for k := range o.Responses.StatusCodeResponses {
+		if k >= 200 && k < 300 {
+			responseCodes = append(responseCodes, k)
+		}
+	}
+	if len(responseCodes) > 0 {
+		sort.Ints(responseCodes)
+		v := o.Responses.StatusCodeResponses[responseCodes[0]]
+		return &v, responseCodes[0], true
+	}
+
+	return o.Responses.Default, 0, false
+}
+
+// JSONLookup look up a value by the json property name
+func (o Operation) JSONLookup(token string) (interface{}, error) {
+	if ex, ok := o.Extensions[token]; ok {
+		return &ex, nil
+	}
+	r, _, err := jsonpointer.GetForToken(o.OperationProps, token)
+	return r, err
+}
+
+// UnmarshalJSON hydrates this items instance with the data from JSON
+func (o *Operation) UnmarshalJSON(data []byte) error {
+	if err := json.Unmarshal(data, &o.OperationProps); err != nil {
+		return err
+	}
+	return json.Unmarshal(data, &o.VendorExtensible)
+}
+
+// MarshalJSON converts this items object to JSON
+func (o Operation) MarshalJSON() ([]byte, error) {
+	b1, err := json.Marshal(o.OperationProps)
+	if err != nil {
+		return nil, err
+	}
+	b2, err := json.Marshal(o.VendorExtensible)
+	if err != nil {
+		return nil, err
+	}
+	concated := swag.ConcatJSON(b1, b2)
+	return concated, nil
+}
+
+// NewOperation creates a new operation instance.
+// It expects an ID as parameter but not passing an ID is also valid.
+func NewOperation(id string) *Operation {
+	op := new(Operation)
+	op.ID = id
+	return op
+}
+
+// WithID sets the ID property on this operation, allows for chaining.
+func (o *Operation) WithID(id string) *Operation {
+	o.ID = id
+	return o
+}
+
+// WithDescription sets the description on this operation, allows for chaining
+func (o *Operation) WithDescription(description string) *Operation {
+	o.Description = description
+	return o
+}
+
+// WithSummary sets the summary on this operation, allows for chaining
+func (o *Operation) WithSummary(summary string) *Operation {
+	o.Summary = summary
+	return o
+}
+
+// WithExternalDocs sets/removes the external docs for/from this operation.
+// When you pass empty strings as params the external documents will be removed.
+// When you pass non-empty string as one value then those values will be used on the external docs object.
+// So when you pass a non-empty description, you should also pass the url and vice versa.
+func (o *Operation) WithExternalDocs(description, url string) *Operation {
+	if description == "" && url == "" {
+		o.ExternalDocs = nil
+		return o
+	}
+
+	if o.ExternalDocs == nil {
+		o.ExternalDocs = &ExternalDocumentation{}
+	}
+	o.ExternalDocs.Description = description
+	o.ExternalDocs.URL = url
+	return o
+}
+
+// Deprecate marks the operation as deprecated
+func (o *Operation) Deprecate() *Operation {
+	o.Deprecated = true
+	return o
+}
+
+// Undeprecate marks the operation as not deprected
+func (o *Operation) Undeprecate() *Operation {
+	o.Deprecated = false
+	return o
+}
+
+// WithConsumes adds media types for incoming body values
+func (o *Operation) WithConsumes(mediaTypes ...string) *Operation {
+	o.Consumes = append(o.Consumes, mediaTypes...)
+	return o
+}
+
+// WithProduces adds media types for outgoing body values
+func (o *Operation) WithProduces(mediaTypes ...string) *Operation {
+	o.Produces = append(o.Produces, mediaTypes...)
+	return o
+}
+
+// WithTags adds tags for this operation
+func (o *Operation) WithTags(tags ...string) *Operation {
+	o.Tags = append(o.Tags, tags...)
+	return o
+}
+
+// AddParam adds a parameter to this operation, when a parameter for that location
+// and with that name already exists it will be replaced
+func (o *Operation) AddParam(param *Parameter) *Operation {
+	if param == nil {
+		return o
+	}
+
+	for i, p := range o.Parameters {
+		if p.Name == param.Name && p.In == param.In {
+			params := append(o.Parameters[:i], *param)
+			params = append(params, o.Parameters[i+1:]...)
+			o.Parameters = params
+			return o
+		}
+	}
+
+	o.Parameters = append(o.Parameters, *param)
+	return o
+}
+
+// RemoveParam removes a parameter from the operation
+func (o *Operation) RemoveParam(name, in string) *Operation {
+	for i, p := range o.Parameters {
+		if p.Name == name && p.In == in {
+			o.Parameters = append(o.Parameters[:i], o.Parameters[i+1:]...)
+			return o
+		}
+	}
+	return o
+}
+
+// SecuredWith adds a security scope to this operation.
+func (o *Operation) SecuredWith(name string, scopes ...string) *Operation {
+	o.Security = append(o.Security, map[string][]string{name: scopes})
+	return o
+}
+
+// WithDefaultResponse adds a default response to the operation.
+// Passing a nil value will remove the response
+func (o *Operation) WithDefaultResponse(response *Response) *Operation {
+	return o.RespondsWith(0, response)
+}
+
+// RespondsWith adds a status code response to the operation.
+// When the code is 0 the value of the response will be used as default response value.
+// When the value of the response is nil it will be removed from the operation
+func (o *Operation) RespondsWith(code int, response *Response) *Operation {
+	if o.Responses == nil {
+		o.Responses = new(Responses)
+	}
+	if code == 0 {
+		o.Responses.Default = response
+		return o
+	}
+	if response == nil {
+		delete(o.Responses.StatusCodeResponses, code)
+		return o
+	}
+	if o.Responses.StatusCodeResponses == nil {
+		o.Responses.StatusCodeResponses = make(map[int]Response)
+	}
+	o.Responses.StatusCodeResponses[code] = *response
+	return o
+}
+
+type opsAlias OperationProps
+
+type gobAlias struct {
+	Security []map[string]struct {
+		List []string
+		Pad  bool
+	}
+	Alias           *opsAlias
+	SecurityIsEmpty bool
+}
+
+// GobEncode provides a safe gob encoder for Operation, including empty security requirements
+func (o Operation) GobEncode() ([]byte, error) {
+	raw := struct {
+		Ext   VendorExtensible
+		Props OperationProps
+	}{
+		Ext:   o.VendorExtensible,
+		Props: o.OperationProps,
+	}
+	var b bytes.Buffer
+	err := gob.NewEncoder(&b).Encode(raw)
+	return b.Bytes(), err
+}
+
+// GobDecode provides a safe gob decoder for Operation, including empty security requirements
+func (o *Operation) GobDecode(b []byte) error {
+	var raw struct {
+		Ext   VendorExtensible
+		Props OperationProps
+	}
+
+	buf := bytes.NewBuffer(b)
+	err := gob.NewDecoder(buf).Decode(&raw)
+	if err != nil {
+		return err
+	}
+	o.VendorExtensible = raw.Ext
+	o.OperationProps = raw.Props
+	return nil
+}
+
+// GobEncode provides a safe gob encoder for Operation, including empty security requirements
+func (op OperationProps) GobEncode() ([]byte, error) {
+	raw := gobAlias{
+		Alias: (*opsAlias)(&op),
+	}
+
+	var b bytes.Buffer
+	if op.Security == nil {
+		// nil security requirement
+		err := gob.NewEncoder(&b).Encode(raw)
+		return b.Bytes(), err
+	}
+
+	if len(op.Security) == 0 {
+		// empty, but non-nil security requirement
+		raw.SecurityIsEmpty = true
+		raw.Alias.Security = nil
+		err := gob.NewEncoder(&b).Encode(raw)
+		return b.Bytes(), err
+	}
+
+	raw.Security = make([]map[string]struct {
+		List []string
+		Pad  bool
+	}, 0, len(op.Security))
+	for _, req := range op.Security {
+		v := make(map[string]struct {
+			List []string
+			Pad  bool
+		}, len(req))
+		for k, val := range req {
+			v[k] = struct {
+				List []string
+				Pad  bool
+			}{
+				List: val,
+			}
+		}
+		raw.Security = append(raw.Security, v)
+	}
+
+	err := gob.NewEncoder(&b).Encode(raw)
+	return b.Bytes(), err
+}
+
+// GobDecode provides a safe gob decoder for Operation, including empty security requirements
+func (op *OperationProps) GobDecode(b []byte) error {
+	var raw gobAlias
+
+	buf := bytes.NewBuffer(b)
+	err := gob.NewDecoder(buf).Decode(&raw)
+	if err != nil {
+		return err
+	}
+	if raw.Alias == nil {
+		return nil
+	}
+
+	switch {
+	case raw.SecurityIsEmpty:
+		// empty, but non-nil security requirement
+		raw.Alias.Security = []map[string][]string{}
+	case len(raw.Alias.Security) == 0:
+		// nil security requirement
+		raw.Alias.Security = nil
+	default:
+		raw.Alias.Security = make([]map[string][]string, 0, len(raw.Security))
+		for _, req := range raw.Security {
+			v := make(map[string][]string, len(req))
+			for k, val := range req {
+				v[k] = make([]string, 0, len(val.List))
+				v[k] = append(v[k], val.List...)
+			}
+			raw.Alias.Security = append(raw.Alias.Security, v)
+		}
+	}
+
+	*op = *(*OperationProps)(raw.Alias)
+	return nil
+}
diff --git a/vendor/github.com/go-openapi/spec/parameter.go b/vendor/github.com/go-openapi/spec/parameter.go
new file mode 100644
index 00000000..cecdff54
--- /dev/null
+++ b/vendor/github.com/go-openapi/spec/parameter.go
@@ -0,0 +1,321 @@
+// Copyright 2015 go-swagger maintainers
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//    http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package spec
+
+import (
+	"encoding/json"
+	"strings"
+
+	"github.com/go-openapi/jsonpointer"
+	"github.com/go-openapi/swag"
+)
+
+// QueryParam creates a query parameter
+func QueryParam(name string) *Parameter {
+	return &Parameter{ParamProps: ParamProps{Name: name, In: "query"}}
+}
+
+// HeaderParam creates a header parameter, this is always required by default
+func HeaderParam(name string) *Parameter {
+	return &Parameter{ParamProps: ParamProps{Name: name, In: "header", Required: true}}
+}
+
+// PathParam creates a path parameter, this is always required
+func PathParam(name string) *Parameter {
+	return &Parameter{ParamProps: ParamProps{Name: name, In: "path", Required: true}}
+}
+
+// BodyParam creates a body parameter
+func BodyParam(name string, schema *Schema) *Parameter {
+	return &Parameter{ParamProps: ParamProps{Name: name, In: "body", Schema: schema},
+		SimpleSchema: SimpleSchema{Type: "object"}}
+}
+
+// FormDataParam creates a body parameter
+func FormDataParam(name string) *Parameter {
+	return &Parameter{ParamProps: ParamProps{Name: name, In: "formData"}}
+}
+
+// FileParam creates a body parameter
+func FileParam(name string) *Parameter {
+	return &Parameter{ParamProps: ParamProps{Name: name, In: "formData"},
+		SimpleSchema: SimpleSchema{Type: "file"}}
+}
+
+// SimpleArrayParam creates a param for a simple array (string, int, date etc)
+func SimpleArrayParam(name, tpe, fmt string) *Parameter {
+	return &Parameter{ParamProps: ParamProps{Name: name},
+		SimpleSchema: SimpleSchema{Type: jsonArray, CollectionFormat: "csv",
+			Items: &Items{SimpleSchema: SimpleSchema{Type: "string", Format: fmt}}}}
+}
+
+// ParamRef creates a parameter that's a json reference
+func ParamRef(uri string) *Parameter {
+	p := new(Parameter)
+	p.Ref = MustCreateRef(uri)
+	return p
+}
+
+// ParamProps describes the specific attributes of an operation parameter
+//
+// NOTE:
+// - Schema is defined when "in" == "body": see validate
+// - AllowEmptyValue is allowed where "in" == "query" || "formData"
+type ParamProps struct {
+	Description     string  `json:"description,omitempty"`
+	Name            string  `json:"name,omitempty"`
+	In              string  `json:"in,omitempty"`
+	Required        bool    `json:"required,omitempty"`
+	Schema          *Schema `json:"schema,omitempty"`
+	AllowEmptyValue bool    `json:"allowEmptyValue,omitempty"`
+}
+
+// Parameter a unique parameter is defined by a combination of a [name](#parameterName) and [location](#parameterIn).
+//
+// There are five possible parameter types.
+// * Path - Used together with [Path Templating](#pathTemplating), where the parameter value is actually part
+//   of the operation's URL. This does not include the host or base path of the API. For example, in `/items/{itemId}`,
+//   the path parameter is `itemId`.
+// * Query - Parameters that are appended to the URL. For example, in `/items?id=###`, the query parameter is `id`.
+// * Header - Custom headers that are expected as part of the request.
+// * Body - The payload that's appended to the HTTP request. Since there can only be one payload, there can only be
+//   _one_ body parameter. The name of the body parameter has no effect on the parameter itself and is used for
+//   documentation purposes only. Since Form parameters are also in the payload, body and form parameters cannot exist
+//   together for the same operation.
+// * Form - Used to describe the payload of an HTTP request when either `application/x-www-form-urlencoded` or
+//   `multipart/form-data` are used as the content type of the request (in Swagger's definition,
+//   the [`consumes`](#operationConsumes) property of an operation). This is the only parameter type that can be used
+//   to send files, thus supporting the `file` type. Since form parameters are sent in the payload, they cannot be
+//   declared together with a body parameter for the same operation. Form parameters have a different format based on
+//   the content-type used (for further details, consult http://www.w3.org/TR/html401/interact/forms.html#h-17.13.4).
+//   * `application/x-www-form-urlencoded` - Similar to the format of Query parameters but as a payload.
+//   For example, `foo=1&bar=swagger` - both `foo` and `bar` are form parameters. This is normally used for simple
+//   parameters that are being transferred.
+//   * `multipart/form-data` - each parameter takes a section in the payload with an internal header.
+//   For example, for the header `Content-Disposition: form-data; name="submit-name"` the name of the parameter is
+//   `submit-name`. This type of form parameters is more commonly used for file transfers.
+//
+// For more information: http://goo.gl/8us55a#parameterObject
+type Parameter struct {
+	Refable
+	CommonValidations
+	SimpleSchema
+	VendorExtensible
+	ParamProps
+}
+
+// JSONLookup look up a value by the json property name
+func (p Parameter) JSONLookup(token string) (interface{}, error) {
+	if ex, ok := p.Extensions[token]; ok {
+		return &ex, nil
+	}
+	if token == jsonRef {
+		return &p.Ref, nil
+	}
+
+	r, _, err := jsonpointer.GetForToken(p.CommonValidations, token)
+	if err != nil && !strings.HasPrefix(err.Error(), "object has no field") {
+		return nil, err
+	}
+	if r != nil {
+		return r, nil
+	}
+	r, _, err = jsonpointer.GetForToken(p.SimpleSchema, token)
+	if err != nil && !strings.HasPrefix(err.Error(), "object has no field") {
+		return nil, err
+	}
+	if r != nil {
+		return r, nil
+	}
+	r, _, err = jsonpointer.GetForToken(p.ParamProps, token)
+	return r, err
+}
+
+// WithDescription a fluent builder method for the description of the parameter
+func (p *Parameter) WithDescription(description string) *Parameter {
+	p.Description = description
+	return p
+}
+
+// Named a fluent builder method to override the name of the parameter
+func (p *Parameter) Named(name string) *Parameter {
+	p.Name = name
+	return p
+}
+
+// WithLocation a fluent builder method to override the location of the parameter
+func (p *Parameter) WithLocation(in string) *Parameter {
+	p.In = in
+	return p
+}
+
+// Typed a fluent builder method for the type of the parameter value
+func (p *Parameter) Typed(tpe, format string) *Parameter {
+	p.Type = tpe
+	p.Format = format
+	return p
+}
+
+// CollectionOf a fluent builder method for an array parameter
+func (p *Parameter) CollectionOf(items *Items, format string) *Parameter {
+	p.Type = jsonArray
+	p.Items = items
+	p.CollectionFormat = format
+	return p
+}
+
+// WithDefault sets the default value on this parameter
+func (p *Parameter) WithDefault(defaultValue interface{}) *Parameter {
+	p.AsOptional() // with default implies optional
+	p.Default = defaultValue
+	return p
+}
+
+// AllowsEmptyValues flags this parameter as being ok with empty values
+func (p *Parameter) AllowsEmptyValues() *Parameter {
+	p.AllowEmptyValue = true
+	return p
+}
+
+// NoEmptyValues flags this parameter as not liking empty values
+func (p *Parameter) NoEmptyValues() *Parameter {
+	p.AllowEmptyValue = false
+	return p
+}
+
+// AsOptional flags this parameter as optional
+func (p *Parameter) AsOptional() *Parameter {
+	p.Required = false
+	return p
+}
+
+// AsRequired flags this parameter as required
+func (p *Parameter) AsRequired() *Parameter {
+	if p.Default != nil { // with a default required makes no sense
+		return p
+	}
+	p.Required = true
+	return p
+}
+
+// WithMaxLength sets a max length value
+func (p *Parameter) WithMaxLength(max int64) *Parameter {
+	p.MaxLength = &max
+	return p
+}
+
+// WithMinLength sets a min length value
+func (p *Parameter) WithMinLength(min int64) *Parameter {
+	p.MinLength = &min
+	return p
+}
+
+// WithPattern sets a pattern value
+func (p *Parameter) WithPattern(pattern string) *Parameter {
+	p.Pattern = pattern
+	return p
+}
+
+// WithMultipleOf sets a multiple of value
+func (p *Parameter) WithMultipleOf(number float64) *Parameter {
+	p.MultipleOf = &number
+	return p
+}
+
+// WithMaximum sets a maximum number value
+func (p *Parameter) WithMaximum(max float64, exclusive bool) *Parameter {
+	p.Maximum = &max
+	p.ExclusiveMaximum = exclusive
+	return p
+}
+
+// WithMinimum sets a minimum number value
+func (p *Parameter) WithMinimum(min float64, exclusive bool) *Parameter {
+	p.Minimum = &min
+	p.ExclusiveMinimum = exclusive
+	return p
+}
+
+// WithEnum sets a the enum values (replace)
+func (p *Parameter) WithEnum(values ...interface{}) *Parameter {
+	p.Enum = append([]interface{}{}, values...)
+	return p
+}
+
+// WithMaxItems sets the max items
+func (p *Parameter) WithMaxItems(size int64) *Parameter {
+	p.MaxItems = &size
+	return p
+}
+
+// WithMinItems sets the min items
+func (p *Parameter) WithMinItems(size int64) *Parameter {
+	p.MinItems = &size
+	return p
+}
+
+// UniqueValues dictates that this array can only have unique items
+func (p *Parameter) UniqueValues() *Parameter {
+	p.UniqueItems = true
+	return p
+}
+
+// AllowDuplicates this array can have duplicates
+func (p *Parameter) AllowDuplicates() *Parameter {
+	p.UniqueItems = false
+	return p
+}
+
+// UnmarshalJSON hydrates this items instance with the data from JSON
+func (p *Parameter) UnmarshalJSON(data []byte) error {
+	if err := json.Unmarshal(data, &p.CommonValidations); err != nil {
+		return err
+	}
+	if err := json.Unmarshal(data, &p.Refable); err != nil {
+		return err
+	}
+	if err := json.Unmarshal(data, &p.SimpleSchema); err != nil {
+		return err
+	}
+	if err := json.Unmarshal(data, &p.VendorExtensible); err != nil {
+		return err
+	}
+	return json.Unmarshal(data, &p.ParamProps)
+}
+
+// MarshalJSON converts this items object to JSON
+func (p Parameter) MarshalJSON() ([]byte, error) {
+	b1, err := json.Marshal(p.CommonValidations)
+	if err != nil {
+		return nil, err
+	}
+	b2, err := json.Marshal(p.SimpleSchema)
+	if err != nil {
+		return nil, err
+	}
+	b3, err := json.Marshal(p.Refable)
+	if err != nil {
+		return nil, err
+	}
+	b4, err := json.Marshal(p.VendorExtensible)
+	if err != nil {
+		return nil, err
+	}
+	b5, err := json.Marshal(p.ParamProps)
+	if err != nil {
+		return nil, err
+	}
+	return swag.ConcatJSON(b3, b1, b2, b4, b5), nil
+}
diff --git a/vendor/github.com/go-openapi/spec/path_item.go b/vendor/github.com/go-openapi/spec/path_item.go
new file mode 100644
index 00000000..68fc8e90
--- /dev/null
+++ b/vendor/github.com/go-openapi/spec/path_item.go
@@ -0,0 +1,87 @@
+// Copyright 2015 go-swagger maintainers
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//    http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package spec
+
+import (
+	"encoding/json"
+
+	"github.com/go-openapi/jsonpointer"
+	"github.com/go-openapi/swag"
+)
+
+// PathItemProps the path item specific properties
+type PathItemProps struct {
+	Get        *Operation  `json:"get,omitempty"`
+	Put        *Operation  `json:"put,omitempty"`
+	Post       *Operation  `json:"post,omitempty"`
+	Delete     *Operation  `json:"delete,omitempty"`
+	Options    *Operation  `json:"options,omitempty"`
+	Head       *Operation  `json:"head,omitempty"`
+	Patch      *Operation  `json:"patch,omitempty"`
+	Parameters []Parameter `json:"parameters,omitempty"`
+}
+
+// PathItem describes the operations available on a single path.
+// A Path Item may be empty, due to [ACL constraints](http://goo.gl/8us55a#securityFiltering).
+// The path itself is still exposed to the documentation viewer but they will
+// not know which operations and parameters are available.
+//
+// For more information: http://goo.gl/8us55a#pathItemObject
+type PathItem struct {
+	Refable
+	VendorExtensible
+	PathItemProps
+}
+
+// JSONLookup look up a value by the json property name
+func (p PathItem) JSONLookup(token string) (interface{}, error) {
+	if ex, ok := p.Extensions[token]; ok {
+		return &ex, nil
+	}
+	if token == jsonRef {
+		return &p.Ref, nil
+	}
+	r, _, err := jsonpointer.GetForToken(p.PathItemProps, token)
+	return r, err
+}
+
+// UnmarshalJSON hydrates this items instance with the data from JSON
+func (p *PathItem) UnmarshalJSON(data []byte) error {
+	if err := json.Unmarshal(data, &p.Refable); err != nil {
+		return err
+	}
+	if err := json.Unmarshal(data, &p.VendorExtensible); err != nil {
+		return err
+	}
+	return json.Unmarshal(data, &p.PathItemProps)
+}
+
+// MarshalJSON converts this items object to JSON
+func (p PathItem) MarshalJSON() ([]byte, error) {
+	b3, err := json.Marshal(p.Refable)
+	if err != nil {
+		return nil, err
+	}
+	b4, err := json.Marshal(p.VendorExtensible)
+	if err != nil {
+		return nil, err
+	}
+	b5, err := json.Marshal(p.PathItemProps)
+	if err != nil {
+		return nil, err
+	}
+	concated := swag.ConcatJSON(b3, b4, b5)
+	return concated, nil
+}
diff --git a/vendor/github.com/go-openapi/spec/paths.go b/vendor/github.com/go-openapi/spec/paths.go
new file mode 100644
index 00000000..9dc82a29
--- /dev/null
+++ b/vendor/github.com/go-openapi/spec/paths.go
@@ -0,0 +1,97 @@
+// Copyright 2015 go-swagger maintainers
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//    http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package spec
+
+import (
+	"encoding/json"
+	"fmt"
+	"strings"
+
+	"github.com/go-openapi/swag"
+)
+
+// Paths holds the relative paths to the individual endpoints.
+// The path is appended to the [`basePath`](http://goo.gl/8us55a#swaggerBasePath) in order
+// to construct the full URL.
+// The Paths may be empty, due to [ACL constraints](http://goo.gl/8us55a#securityFiltering).
+//
+// For more information: http://goo.gl/8us55a#pathsObject
+type Paths struct {
+	VendorExtensible
+	Paths map[string]PathItem `json:"-"` // custom serializer to flatten this, each entry must start with "/"
+}
+
+// JSONLookup look up a value by the json property name
+func (p Paths) JSONLookup(token string) (interface{}, error) {
+	if pi, ok := p.Paths[token]; ok {
+		return &pi, nil
+	}
+	if ex, ok := p.Extensions[token]; ok {
+		return &ex, nil
+	}
+	return nil, fmt.Errorf("object has no field %q", token)
+}
+
+// UnmarshalJSON hydrates this items instance with the data from JSON
+func (p *Paths) UnmarshalJSON(data []byte) error {
+	var res map[string]json.RawMessage
+	if err := json.Unmarshal(data, &res); err != nil {
+		return err
+	}
+	for k, v := range res {
+		if strings.HasPrefix(strings.ToLower(k), "x-") {
+			if p.Extensions == nil {
+				p.Extensions = make(map[string]interface{})
+			}
+			var d interface{}
+			if err := json.Unmarshal(v, &d); err != nil {
+				return err
+			}
+			p.Extensions[k] = d
+		}
+		if strings.HasPrefix(k, "/") {
+			if p.Paths == nil {
+				p.Paths = make(map[string]PathItem)
+			}
+			var pi PathItem
+			if err := json.Unmarshal(v, &pi); err != nil {
+				return err
+			}
+			p.Paths[k] = pi
+		}
+	}
+	return nil
+}
+
+// MarshalJSON converts this items object to JSON
+func (p Paths) MarshalJSON() ([]byte, error) {
+	b1, err := json.Marshal(p.VendorExtensible)
+	if err != nil {
+		return nil, err
+	}
+
+	pths := make(map[string]PathItem)
+	for k, v := range p.Paths {
+		if strings.HasPrefix(k, "/") {
+			pths[k] = v
+		}
+	}
+	b2, err := json.Marshal(pths)
+	if err != nil {
+		return nil, err
+	}
+	concated := swag.ConcatJSON(b1, b2)
+	return concated, nil
+}
diff --git a/vendor/github.com/go-openapi/spec/ref.go b/vendor/github.com/go-openapi/spec/ref.go
new file mode 100644
index 00000000..08ff869b
--- /dev/null
+++ b/vendor/github.com/go-openapi/spec/ref.go
@@ -0,0 +1,191 @@
+// Copyright 2015 go-swagger maintainers
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//    http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package spec
+
+import (
+	"bytes"
+	"encoding/gob"
+	"encoding/json"
+	"net/http"
+	"os"
+	"path/filepath"
+
+	"github.com/go-openapi/jsonreference"
+)
+
+// Refable is a struct for things that accept a $ref property
+type Refable struct {
+	Ref Ref
+}
+
+// MarshalJSON marshals the ref to json
+func (r Refable) MarshalJSON() ([]byte, error) {
+	return r.Ref.MarshalJSON()
+}
+
+// UnmarshalJSON unmarshalss the ref from json
+func (r *Refable) UnmarshalJSON(d []byte) error {
+	return json.Unmarshal(d, &r.Ref)
+}
+
+// Ref represents a json reference that is potentially resolved
+type Ref struct {
+	jsonreference.Ref
+}
+
+// RemoteURI gets the remote uri part of the ref
+func (r *Ref) RemoteURI() string {
+	if r.String() == "" {
+		return r.String()
+	}
+
+	u := *r.GetURL()
+	u.Fragment = ""
+	return u.String()
+}
+
+// IsValidURI returns true when the url the ref points to can be found
+func (r *Ref) IsValidURI(basepaths ...string) bool {
+	if r.String() == "" {
+		return true
+	}
+
+	v := r.RemoteURI()
+	if v == "" {
+		return true
+	}
+
+	if r.HasFullURL {
+		rr, err := http.Get(v)
+		if err != nil {
+			return false
+		}
+
+		return rr.StatusCode/100 == 2
+	}
+
+	if !(r.HasFileScheme || r.HasFullFilePath || r.HasURLPathOnly) {
+		return false
+	}
+
+	// check for local file
+	pth := v
+	if r.HasURLPathOnly {
+		base := "."
+		if len(basepaths) > 0 {
+			base = filepath.Dir(filepath.Join(basepaths...))
+		}
+		p, e := filepath.Abs(filepath.ToSlash(filepath.Join(base, pth)))
+		if e != nil {
+			return false
+		}
+		pth = p
+	}
+
+	fi, err := os.Stat(filepath.ToSlash(pth))
+	if err != nil {
+		return false
+	}
+
+	return !fi.IsDir()
+}
+
+// Inherits creates a new reference from a parent and a child
+// If the child cannot inherit from the parent, an error is returned
+func (r *Ref) Inherits(child Ref) (*Ref, error) {
+	ref, err := r.Ref.Inherits(child.Ref)
+	if err != nil {
+		return nil, err
+	}
+	return &Ref{Ref: *ref}, nil
+}
+
+// NewRef creates a new instance of a ref object
+// returns an error when the reference uri is an invalid uri
+func NewRef(refURI string) (Ref, error) {
+	ref, err := jsonreference.New(refURI)
+	if err != nil {
+		return Ref{}, err
+	}
+	return Ref{Ref: ref}, nil
+}
+
+// MustCreateRef creates a ref object but panics when refURI is invalid.
+// Use the NewRef method for a version that returns an error.
+func MustCreateRef(refURI string) Ref {
+	return Ref{Ref: jsonreference.MustCreateRef(refURI)}
+}
+
+// MarshalJSON marshals this ref into a JSON object
+func (r Ref) MarshalJSON() ([]byte, error) {
+	str := r.String()
+	if str == "" {
+		if r.IsRoot() {
+			return []byte(`{"$ref":""}`), nil
+		}
+		return []byte("{}"), nil
+	}
+	v := map[string]interface{}{"$ref": str}
+	return json.Marshal(v)
+}
+
+// UnmarshalJSON unmarshals this ref from a JSON object
+func (r *Ref) UnmarshalJSON(d []byte) error {
+	var v map[string]interface{}
+	if err := json.Unmarshal(d, &v); err != nil {
+		return err
+	}
+	return r.fromMap(v)
+}
+
+// GobEncode provides a safe gob encoder for Ref
+func (r Ref) GobEncode() ([]byte, error) {
+	var b bytes.Buffer
+	raw, err := r.MarshalJSON()
+	if err != nil {
+		return nil, err
+	}
+	err = gob.NewEncoder(&b).Encode(raw)
+	return b.Bytes(), err
+}
+
+// GobDecode provides a safe gob decoder for Ref
+func (r *Ref) GobDecode(b []byte) error {
+	var raw []byte
+	buf := bytes.NewBuffer(b)
+	err := gob.NewDecoder(buf).Decode(&raw)
+	if err != nil {
+		return err
+	}
+	return json.Unmarshal(raw, r)
+}
+
+func (r *Ref) fromMap(v map[string]interface{}) error {
+	if v == nil {
+		return nil
+	}
+
+	if vv, ok := v["$ref"]; ok {
+		if str, ok := vv.(string); ok {
+			ref, err := jsonreference.New(str)
+			if err != nil {
+				return err
+			}
+			*r = Ref{Ref: ref}
+		}
+	}
+
+	return nil
+}
diff --git a/vendor/github.com/go-openapi/spec/response.go b/vendor/github.com/go-openapi/spec/response.go
new file mode 100644
index 00000000..27729c1d
--- /dev/null
+++ b/vendor/github.com/go-openapi/spec/response.go
@@ -0,0 +1,131 @@
+// Copyright 2015 go-swagger maintainers
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//    http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package spec
+
+import (
+	"encoding/json"
+
+	"github.com/go-openapi/jsonpointer"
+	"github.com/go-openapi/swag"
+)
+
+// ResponseProps properties specific to a response
+type ResponseProps struct {
+	Description string                 `json:"description,omitempty"`
+	Schema      *Schema                `json:"schema,omitempty"`
+	Headers     map[string]Header      `json:"headers,omitempty"`
+	Examples    map[string]interface{} `json:"examples,omitempty"`
+}
+
+// Response describes a single response from an API Operation.
+//
+// For more information: http://goo.gl/8us55a#responseObject
+type Response struct {
+	Refable
+	ResponseProps
+	VendorExtensible
+}
+
+// JSONLookup look up a value by the json property name
+func (r Response) JSONLookup(token string) (interface{}, error) {
+	if ex, ok := r.Extensions[token]; ok {
+		return &ex, nil
+	}
+	if token == "$ref" {
+		return &r.Ref, nil
+	}
+	ptr, _, err := jsonpointer.GetForToken(r.ResponseProps, token)
+	return ptr, err
+}
+
+// UnmarshalJSON hydrates this items instance with the data from JSON
+func (r *Response) UnmarshalJSON(data []byte) error {
+	if err := json.Unmarshal(data, &r.ResponseProps); err != nil {
+		return err
+	}
+	if err := json.Unmarshal(data, &r.Refable); err != nil {
+		return err
+	}
+	return json.Unmarshal(data, &r.VendorExtensible)
+}
+
+// MarshalJSON converts this items object to JSON
+func (r Response) MarshalJSON() ([]byte, error) {
+	b1, err := json.Marshal(r.ResponseProps)
+	if err != nil {
+		return nil, err
+	}
+	b2, err := json.Marshal(r.Refable)
+	if err != nil {
+		return nil, err
+	}
+	b3, err := json.Marshal(r.VendorExtensible)
+	if err != nil {
+		return nil, err
+	}
+	return swag.ConcatJSON(b1, b2, b3), nil
+}
+
+// NewResponse creates a new response instance
+func NewResponse() *Response {
+	return new(Response)
+}
+
+// ResponseRef creates a response as a json reference
+func ResponseRef(url string) *Response {
+	resp := NewResponse()
+	resp.Ref = MustCreateRef(url)
+	return resp
+}
+
+// WithDescription sets the description on this response, allows for chaining
+func (r *Response) WithDescription(description string) *Response {
+	r.Description = description
+	return r
+}
+
+// WithSchema sets the schema on this response, allows for chaining.
+// Passing a nil argument removes the schema from this response
+func (r *Response) WithSchema(schema *Schema) *Response {
+	r.Schema = schema
+	return r
+}
+
+// AddHeader adds a header to this response
+func (r *Response) AddHeader(name string, header *Header) *Response {
+	if header == nil {
+		return r.RemoveHeader(name)
+	}
+	if r.Headers == nil {
+		r.Headers = make(map[string]Header)
+	}
+	r.Headers[name] = *header
+	return r
+}
+
+// RemoveHeader removes a header from this response
+func (r *Response) RemoveHeader(name string) *Response {
+	delete(r.Headers, name)
+	return r
+}
+
+// AddExample adds an example to this response
+func (r *Response) AddExample(mediaType string, example interface{}) *Response {
+	if r.Examples == nil {
+		r.Examples = make(map[string]interface{})
+	}
+	r.Examples[mediaType] = example
+	return r
+}
diff --git a/vendor/github.com/go-openapi/spec/responses.go b/vendor/github.com/go-openapi/spec/responses.go
new file mode 100644
index 00000000..4efb6f86
--- /dev/null
+++ b/vendor/github.com/go-openapi/spec/responses.go
@@ -0,0 +1,127 @@
+// Copyright 2015 go-swagger maintainers
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//    http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package spec
+
+import (
+	"encoding/json"
+	"fmt"
+	"reflect"
+	"strconv"
+
+	"github.com/go-openapi/swag"
+)
+
+// Responses is a container for the expected responses of an operation.
+// The container maps a HTTP response code to the expected response.
+// It is not expected from the documentation to necessarily cover all possible HTTP response codes,
+// since they may not be known in advance. However, it is expected from the documentation to cover
+// a successful operation response and any known errors.
+//
+// The `default` can be used a default response object for all HTTP codes that are not covered
+// individually by the specification.
+//
+// The `Responses Object` MUST contain at least one response code, and it SHOULD be the response
+// for a successful operation call.
+//
+// For more information: http://goo.gl/8us55a#responsesObject
+type Responses struct {
+	VendorExtensible
+	ResponsesProps
+}
+
+// JSONLookup implements an interface to customize json pointer lookup
+func (r Responses) JSONLookup(token string) (interface{}, error) {
+	if token == "default" {
+		return r.Default, nil
+	}
+	if ex, ok := r.Extensions[token]; ok {
+		return &ex, nil
+	}
+	if i, err := strconv.Atoi(token); err == nil {
+		if scr, ok := r.StatusCodeResponses[i]; ok {
+			return scr, nil
+		}
+	}
+	return nil, fmt.Errorf("object has no field %q", token)
+}
+
+// UnmarshalJSON hydrates this items instance with the data from JSON
+func (r *Responses) UnmarshalJSON(data []byte) error {
+	if err := json.Unmarshal(data, &r.ResponsesProps); err != nil {
+		return err
+	}
+	if err := json.Unmarshal(data, &r.VendorExtensible); err != nil {
+		return err
+	}
+	if reflect.DeepEqual(ResponsesProps{}, r.ResponsesProps) {
+		r.ResponsesProps = ResponsesProps{}
+	}
+	return nil
+}
+
+// MarshalJSON converts this items object to JSON
+func (r Responses) MarshalJSON() ([]byte, error) {
+	b1, err := json.Marshal(r.ResponsesProps)
+	if err != nil {
+		return nil, err
+	}
+	b2, err := json.Marshal(r.VendorExtensible)
+	if err != nil {
+		return nil, err
+	}
+	concated := swag.ConcatJSON(b1, b2)
+	return concated, nil
+}
+
+// ResponsesProps describes all responses for an operation.
+// It tells what is the default response and maps all responses with a
+// HTTP status code.
+type ResponsesProps struct {
+	Default             *Response
+	StatusCodeResponses map[int]Response
+}
+
+// MarshalJSON marshals responses as JSON
+func (r ResponsesProps) MarshalJSON() ([]byte, error) {
+	toser := map[string]Response{}
+	if r.Default != nil {
+		toser["default"] = *r.Default
+	}
+	for k, v := range r.StatusCodeResponses {
+		toser[strconv.Itoa(k)] = v
+	}
+	return json.Marshal(toser)
+}
+
+// UnmarshalJSON unmarshals responses from JSON
+func (r *ResponsesProps) UnmarshalJSON(data []byte) error {
+	var res map[string]Response
+	if err := json.Unmarshal(data, &res); err != nil {
+		return nil
+	}
+	if v, ok := res["default"]; ok {
+		r.Default = &v
+		delete(res, "default")
+	}
+	for k, v := range res {
+		if nk, err := strconv.Atoi(k); err == nil {
+			if r.StatusCodeResponses == nil {
+				r.StatusCodeResponses = map[int]Response{}
+			}
+			r.StatusCodeResponses[nk] = v
+		}
+	}
+	return nil
+}
diff --git a/vendor/github.com/go-openapi/spec/schema.go b/vendor/github.com/go-openapi/spec/schema.go
new file mode 100644
index 00000000..37858ece
--- /dev/null
+++ b/vendor/github.com/go-openapi/spec/schema.go
@@ -0,0 +1,596 @@
+// Copyright 2015 go-swagger maintainers
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//    http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package spec
+
+import (
+	"encoding/json"
+	"fmt"
+	"net/url"
+	"strings"
+
+	"github.com/go-openapi/jsonpointer"
+	"github.com/go-openapi/swag"
+)
+
+// BooleanProperty creates a boolean property
+func BooleanProperty() *Schema {
+	return &Schema{SchemaProps: SchemaProps{Type: []string{"boolean"}}}
+}
+
+// BoolProperty creates a boolean property
+func BoolProperty() *Schema { return BooleanProperty() }
+
+// StringProperty creates a string property
+func StringProperty() *Schema {
+	return &Schema{SchemaProps: SchemaProps{Type: []string{"string"}}}
+}
+
+// CharProperty creates a string property
+func CharProperty() *Schema {
+	return &Schema{SchemaProps: SchemaProps{Type: []string{"string"}}}
+}
+
+// Float64Property creates a float64/double property
+func Float64Property() *Schema {
+	return &Schema{SchemaProps: SchemaProps{Type: []string{"number"}, Format: "double"}}
+}
+
+// Float32Property creates a float32/float property
+func Float32Property() *Schema {
+	return &Schema{SchemaProps: SchemaProps{Type: []string{"number"}, Format: "float"}}
+}
+
+// Int8Property creates an int8 property
+func Int8Property() *Schema {
+	return &Schema{SchemaProps: SchemaProps{Type: []string{"integer"}, Format: "int8"}}
+}
+
+// Int16Property creates an int16 property
+func Int16Property() *Schema {
+	return &Schema{SchemaProps: SchemaProps{Type: []string{"integer"}, Format: "int16"}}
+}
+
+// Int32Property creates an int32 property
+func Int32Property() *Schema {
+	return &Schema{SchemaProps: SchemaProps{Type: []string{"integer"}, Format: "int32"}}
+}
+
+// Int64Property creates an int64 property
+func Int64Property() *Schema {
+	return &Schema{SchemaProps: SchemaProps{Type: []string{"integer"}, Format: "int64"}}
+}
+
+// StrFmtProperty creates a property for the named string format
+func StrFmtProperty(format string) *Schema {
+	return &Schema{SchemaProps: SchemaProps{Type: []string{"string"}, Format: format}}
+}
+
+// DateProperty creates a date property
+func DateProperty() *Schema {
+	return &Schema{SchemaProps: SchemaProps{Type: []string{"string"}, Format: "date"}}
+}
+
+// DateTimeProperty creates a date time property
+func DateTimeProperty() *Schema {
+	return &Schema{SchemaProps: SchemaProps{Type: []string{"string"}, Format: "date-time"}}
+}
+
+// MapProperty creates a map property
+func MapProperty(property *Schema) *Schema {
+	return &Schema{SchemaProps: SchemaProps{Type: []string{"object"},
+		AdditionalProperties: &SchemaOrBool{Allows: true, Schema: property}}}
+}
+
+// RefProperty creates a ref property
+func RefProperty(name string) *Schema {
+	return &Schema{SchemaProps: SchemaProps{Ref: MustCreateRef(name)}}
+}
+
+// RefSchema creates a ref property
+func RefSchema(name string) *Schema {
+	return &Schema{SchemaProps: SchemaProps{Ref: MustCreateRef(name)}}
+}
+
+// ArrayProperty creates an array property
+func ArrayProperty(items *Schema) *Schema {
+	if items == nil {
+		return &Schema{SchemaProps: SchemaProps{Type: []string{"array"}}}
+	}
+	return &Schema{SchemaProps: SchemaProps{Items: &SchemaOrArray{Schema: items}, Type: []string{"array"}}}
+}
+
+// ComposedSchema creates a schema with allOf
+func ComposedSchema(schemas ...Schema) *Schema {
+	s := new(Schema)
+	s.AllOf = schemas
+	return s
+}
+
+// SchemaURL represents a schema url
+type SchemaURL string
+
+// MarshalJSON marshal this to JSON
+func (r SchemaURL) MarshalJSON() ([]byte, error) {
+	if r == "" {
+		return []byte("{}"), nil
+	}
+	v := map[string]interface{}{"$schema": string(r)}
+	return json.Marshal(v)
+}
+
+// UnmarshalJSON unmarshal this from JSON
+func (r *SchemaURL) UnmarshalJSON(data []byte) error {
+	var v map[string]interface{}
+	if err := json.Unmarshal(data, &v); err != nil {
+		return err
+	}
+	return r.fromMap(v)
+}
+
+func (r *SchemaURL) fromMap(v map[string]interface{}) error {
+	if v == nil {
+		return nil
+	}
+	if vv, ok := v["$schema"]; ok {
+		if str, ok := vv.(string); ok {
+			u, err := url.Parse(str)
+			if err != nil {
+				return err
+			}
+
+			*r = SchemaURL(u.String())
+		}
+	}
+	return nil
+}
+
+// SchemaProps describes a JSON schema (draft 4)
+type SchemaProps struct {
+	ID                   string            `json:"id,omitempty"`
+	Ref                  Ref               `json:"-"`
+	Schema               SchemaURL         `json:"-"`
+	Description          string            `json:"description,omitempty"`
+	Type                 StringOrArray     `json:"type,omitempty"`
+	Nullable             bool              `json:"nullable,omitempty"`
+	Format               string            `json:"format,omitempty"`
+	Title                string            `json:"title,omitempty"`
+	Default              interface{}       `json:"default,omitempty"`
+	Maximum              *float64          `json:"maximum,omitempty"`
+	ExclusiveMaximum     bool              `json:"exclusiveMaximum,omitempty"`
+	Minimum              *float64          `json:"minimum,omitempty"`
+	ExclusiveMinimum     bool              `json:"exclusiveMinimum,omitempty"`
+	MaxLength            *int64            `json:"maxLength,omitempty"`
+	MinLength            *int64            `json:"minLength,omitempty"`
+	Pattern              string            `json:"pattern,omitempty"`
+	MaxItems             *int64            `json:"maxItems,omitempty"`
+	MinItems             *int64            `json:"minItems,omitempty"`
+	UniqueItems          bool              `json:"uniqueItems,omitempty"`
+	MultipleOf           *float64          `json:"multipleOf,omitempty"`
+	Enum                 []interface{}     `json:"enum,omitempty"`
+	MaxProperties        *int64            `json:"maxProperties,omitempty"`
+	MinProperties        *int64            `json:"minProperties,omitempty"`
+	Required             []string          `json:"required,omitempty"`
+	Items                *SchemaOrArray    `json:"items,omitempty"`
+	AllOf                []Schema          `json:"allOf,omitempty"`
+	OneOf                []Schema          `json:"oneOf,omitempty"`
+	AnyOf                []Schema          `json:"anyOf,omitempty"`
+	Not                  *Schema           `json:"not,omitempty"`
+	Properties           map[string]Schema `json:"properties,omitempty"`
+	AdditionalProperties *SchemaOrBool     `json:"additionalProperties,omitempty"`
+	PatternProperties    map[string]Schema `json:"patternProperties,omitempty"`
+	Dependencies         Dependencies      `json:"dependencies,omitempty"`
+	AdditionalItems      *SchemaOrBool     `json:"additionalItems,omitempty"`
+	Definitions          Definitions       `json:"definitions,omitempty"`
+}
+
+// SwaggerSchemaProps are additional properties supported by swagger schemas, but not JSON-schema (draft 4)
+type SwaggerSchemaProps struct {
+	Discriminator string                 `json:"discriminator,omitempty"`
+	ReadOnly      bool                   `json:"readOnly,omitempty"`
+	XML           *XMLObject             `json:"xml,omitempty"`
+	ExternalDocs  *ExternalDocumentation `json:"externalDocs,omitempty"`
+	Example       interface{}            `json:"example,omitempty"`
+}
+
+// Schema the schema object allows the definition of input and output data types.
+// These types can be objects, but also primitives and arrays.
+// This object is based on the [JSON Schema Specification Draft 4](http://json-schema.org/)
+// and uses a predefined subset of it.
+// On top of this subset, there are extensions provided by this specification to allow for more complete documentation.
+//
+// For more information: http://goo.gl/8us55a#schemaObject
+type Schema struct {
+	VendorExtensible
+	SchemaProps
+	SwaggerSchemaProps
+	ExtraProps map[string]interface{} `json:"-"`
+}
+
+// JSONLookup implements an interface to customize json pointer lookup
+func (s Schema) JSONLookup(token string) (interface{}, error) {
+	if ex, ok := s.Extensions[token]; ok {
+		return &ex, nil
+	}
+
+	if ex, ok := s.ExtraProps[token]; ok {
+		return &ex, nil
+	}
+
+	r, _, err := jsonpointer.GetForToken(s.SchemaProps, token)
+	if r != nil || (err != nil && !strings.HasPrefix(err.Error(), "object has no field")) {
+		return r, err
+	}
+	r, _, err = jsonpointer.GetForToken(s.SwaggerSchemaProps, token)
+	return r, err
+}
+
+// WithID sets the id for this schema, allows for chaining
+func (s *Schema) WithID(id string) *Schema {
+	s.ID = id
+	return s
+}
+
+// WithTitle sets the title for this schema, allows for chaining
+func (s *Schema) WithTitle(title string) *Schema {
+	s.Title = title
+	return s
+}
+
+// WithDescription sets the description for this schema, allows for chaining
+func (s *Schema) WithDescription(description string) *Schema {
+	s.Description = description
+	return s
+}
+
+// WithProperties sets the properties for this schema
+func (s *Schema) WithProperties(schemas map[string]Schema) *Schema {
+	s.Properties = schemas
+	return s
+}
+
+// SetProperty sets a property on this schema
+func (s *Schema) SetProperty(name string, schema Schema) *Schema {
+	if s.Properties == nil {
+		s.Properties = make(map[string]Schema)
+	}
+	s.Properties[name] = schema
+	return s
+}
+
+// WithAllOf sets the all of property
+func (s *Schema) WithAllOf(schemas ...Schema) *Schema {
+	s.AllOf = schemas
+	return s
+}
+
+// WithMaxProperties sets the max number of properties an object can have
+func (s *Schema) WithMaxProperties(max int64) *Schema {
+	s.MaxProperties = &max
+	return s
+}
+
+// WithMinProperties sets the min number of properties an object must have
+func (s *Schema) WithMinProperties(min int64) *Schema {
+	s.MinProperties = &min
+	return s
+}
+
+// Typed sets the type of this schema for a single value item
+func (s *Schema) Typed(tpe, format string) *Schema {
+	s.Type = []string{tpe}
+	s.Format = format
+	return s
+}
+
+// AddType adds a type with potential format to the types for this schema
+func (s *Schema) AddType(tpe, format string) *Schema {
+	s.Type = append(s.Type, tpe)
+	if format != "" {
+		s.Format = format
+	}
+	return s
+}
+
+// AsNullable flags this schema as nullable.
+func (s *Schema) AsNullable() *Schema {
+	s.Nullable = true
+	return s
+}
+
+// CollectionOf a fluent builder method for an array parameter
+func (s *Schema) CollectionOf(items Schema) *Schema {
+	s.Type = []string{jsonArray}
+	s.Items = &SchemaOrArray{Schema: &items}
+	return s
+}
+
+// WithDefault sets the default value on this parameter
+func (s *Schema) WithDefault(defaultValue interface{}) *Schema {
+	s.Default = defaultValue
+	return s
+}
+
+// WithRequired flags this parameter as required
+func (s *Schema) WithRequired(items ...string) *Schema {
+	s.Required = items
+	return s
+}
+
+// AddRequired  adds field names to the required properties array
+func (s *Schema) AddRequired(items ...string) *Schema {
+	s.Required = append(s.Required, items...)
+	return s
+}
+
+// WithMaxLength sets a max length value
+func (s *Schema) WithMaxLength(max int64) *Schema {
+	s.MaxLength = &max
+	return s
+}
+
+// WithMinLength sets a min length value
+func (s *Schema) WithMinLength(min int64) *Schema {
+	s.MinLength = &min
+	return s
+}
+
+// WithPattern sets a pattern value
+func (s *Schema) WithPattern(pattern string) *Schema {
+	s.Pattern = pattern
+	return s
+}
+
+// WithMultipleOf sets a multiple of value
+func (s *Schema) WithMultipleOf(number float64) *Schema {
+	s.MultipleOf = &number
+	return s
+}
+
+// WithMaximum sets a maximum number value
+func (s *Schema) WithMaximum(max float64, exclusive bool) *Schema {
+	s.Maximum = &max
+	s.ExclusiveMaximum = exclusive
+	return s
+}
+
+// WithMinimum sets a minimum number value
+func (s *Schema) WithMinimum(min float64, exclusive bool) *Schema {
+	s.Minimum = &min
+	s.ExclusiveMinimum = exclusive
+	return s
+}
+
+// WithEnum sets a the enum values (replace)
+func (s *Schema) WithEnum(values ...interface{}) *Schema {
+	s.Enum = append([]interface{}{}, values...)
+	return s
+}
+
+// WithMaxItems sets the max items
+func (s *Schema) WithMaxItems(size int64) *Schema {
+	s.MaxItems = &size
+	return s
+}
+
+// WithMinItems sets the min items
+func (s *Schema) WithMinItems(size int64) *Schema {
+	s.MinItems = &size
+	return s
+}
+
+// UniqueValues dictates that this array can only have unique items
+func (s *Schema) UniqueValues() *Schema {
+	s.UniqueItems = true
+	return s
+}
+
+// AllowDuplicates this array can have duplicates
+func (s *Schema) AllowDuplicates() *Schema {
+	s.UniqueItems = false
+	return s
+}
+
+// AddToAllOf adds a schema to the allOf property
+func (s *Schema) AddToAllOf(schemas ...Schema) *Schema {
+	s.AllOf = append(s.AllOf, schemas...)
+	return s
+}
+
+// WithDiscriminator sets the name of the discriminator field
+func (s *Schema) WithDiscriminator(discriminator string) *Schema {
+	s.Discriminator = discriminator
+	return s
+}
+
+// AsReadOnly flags this schema as readonly
+func (s *Schema) AsReadOnly() *Schema {
+	s.ReadOnly = true
+	return s
+}
+
+// AsWritable flags this schema as writeable (not read-only)
+func (s *Schema) AsWritable() *Schema {
+	s.ReadOnly = false
+	return s
+}
+
+// WithExample sets the example for this schema
+func (s *Schema) WithExample(example interface{}) *Schema {
+	s.Example = example
+	return s
+}
+
+// WithExternalDocs sets/removes the external docs for/from this schema.
+// When you pass empty strings as params the external documents will be removed.
+// When you pass non-empty string as one value then those values will be used on the external docs object.
+// So when you pass a non-empty description, you should also pass the url and vice versa.
+func (s *Schema) WithExternalDocs(description, url string) *Schema {
+	if description == "" && url == "" {
+		s.ExternalDocs = nil
+		return s
+	}
+
+	if s.ExternalDocs == nil {
+		s.ExternalDocs = &ExternalDocumentation{}
+	}
+	s.ExternalDocs.Description = description
+	s.ExternalDocs.URL = url
+	return s
+}
+
+// WithXMLName sets the xml name for the object
+func (s *Schema) WithXMLName(name string) *Schema {
+	if s.XML == nil {
+		s.XML = new(XMLObject)
+	}
+	s.XML.Name = name
+	return s
+}
+
+// WithXMLNamespace sets the xml namespace for the object
+func (s *Schema) WithXMLNamespace(namespace string) *Schema {
+	if s.XML == nil {
+		s.XML = new(XMLObject)
+	}
+	s.XML.Namespace = namespace
+	return s
+}
+
+// WithXMLPrefix sets the xml prefix for the object
+func (s *Schema) WithXMLPrefix(prefix string) *Schema {
+	if s.XML == nil {
+		s.XML = new(XMLObject)
+	}
+	s.XML.Prefix = prefix
+	return s
+}
+
+// AsXMLAttribute flags this object as xml attribute
+func (s *Schema) AsXMLAttribute() *Schema {
+	if s.XML == nil {
+		s.XML = new(XMLObject)
+	}
+	s.XML.Attribute = true
+	return s
+}
+
+// AsXMLElement flags this object as an xml node
+func (s *Schema) AsXMLElement() *Schema {
+	if s.XML == nil {
+		s.XML = new(XMLObject)
+	}
+	s.XML.Attribute = false
+	return s
+}
+
+// AsWrappedXML flags this object as wrapped, this is mostly useful for array types
+func (s *Schema) AsWrappedXML() *Schema {
+	if s.XML == nil {
+		s.XML = new(XMLObject)
+	}
+	s.XML.Wrapped = true
+	return s
+}
+
+// AsUnwrappedXML flags this object as an xml node
+func (s *Schema) AsUnwrappedXML() *Schema {
+	if s.XML == nil {
+		s.XML = new(XMLObject)
+	}
+	s.XML.Wrapped = false
+	return s
+}
+
+// MarshalJSON marshal this to JSON
+func (s Schema) MarshalJSON() ([]byte, error) {
+	b1, err := json.Marshal(s.SchemaProps)
+	if err != nil {
+		return nil, fmt.Errorf("schema props %v", err)
+	}
+	b2, err := json.Marshal(s.VendorExtensible)
+	if err != nil {
+		return nil, fmt.Errorf("vendor props %v", err)
+	}
+	b3, err := s.Ref.MarshalJSON()
+	if err != nil {
+		return nil, fmt.Errorf("ref prop %v", err)
+	}
+	b4, err := s.Schema.MarshalJSON()
+	if err != nil {
+		return nil, fmt.Errorf("schema prop %v", err)
+	}
+	b5, err := json.Marshal(s.SwaggerSchemaProps)
+	if err != nil {
+		return nil, fmt.Errorf("common validations %v", err)
+	}
+	var b6 []byte
+	if s.ExtraProps != nil {
+		jj, err := json.Marshal(s.ExtraProps)
+		if err != nil {
+			return nil, fmt.Errorf("extra props %v", err)
+		}
+		b6 = jj
+	}
+	return swag.ConcatJSON(b1, b2, b3, b4, b5, b6), nil
+}
+
+// UnmarshalJSON marshal this from JSON
+func (s *Schema) UnmarshalJSON(data []byte) error {
+	props := struct {
+		SchemaProps
+		SwaggerSchemaProps
+	}{}
+	if err := json.Unmarshal(data, &props); err != nil {
+		return err
+	}
+
+	sch := Schema{
+		SchemaProps:        props.SchemaProps,
+		SwaggerSchemaProps: props.SwaggerSchemaProps,
+	}
+
+	var d map[string]interface{}
+	if err := json.Unmarshal(data, &d); err != nil {
+		return err
+	}
+
+	_ = sch.Ref.fromMap(d)
+	_ = sch.Schema.fromMap(d)
+
+	delete(d, "$ref")
+	delete(d, "$schema")
+	for _, pn := range swag.DefaultJSONNameProvider.GetJSONNames(s) {
+		delete(d, pn)
+	}
+
+	for k, vv := range d {
+		lk := strings.ToLower(k)
+		if strings.HasPrefix(lk, "x-") {
+			if sch.Extensions == nil {
+				sch.Extensions = map[string]interface{}{}
+			}
+			sch.Extensions[k] = vv
+			continue
+		}
+		if sch.ExtraProps == nil {
+			sch.ExtraProps = map[string]interface{}{}
+		}
+		sch.ExtraProps[k] = vv
+	}
+
+	*s = sch
+
+	return nil
+}
diff --git a/vendor/github.com/go-openapi/spec/schema_loader.go b/vendor/github.com/go-openapi/spec/schema_loader.go
new file mode 100644
index 00000000..9e20e96c
--- /dev/null
+++ b/vendor/github.com/go-openapi/spec/schema_loader.go
@@ -0,0 +1,276 @@
+// Copyright 2015 go-swagger maintainers
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//    http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package spec
+
+import (
+	"encoding/json"
+	"fmt"
+	"log"
+	"net/url"
+	"reflect"
+	"strings"
+
+	"github.com/go-openapi/swag"
+)
+
+// PathLoader function to use when loading remote refs
+var PathLoader func(string) (json.RawMessage, error)
+
+func init() {
+	PathLoader = func(path string) (json.RawMessage, error) {
+		data, err := swag.LoadFromFileOrHTTP(path)
+		if err != nil {
+			return nil, err
+		}
+		return json.RawMessage(data), nil
+	}
+}
+
+// resolverContext allows to share a context during spec processing.
+// At the moment, it just holds the index of circular references found.
+type resolverContext struct {
+	// circulars holds all visited circular references, which allows shortcuts.
+	// NOTE: this is not just a performance improvement: it is required to figure out
+	// circular references which participate several cycles.
+	// This structure is privately instantiated and needs not be locked against
+	// concurrent access, unless we chose to implement a parallel spec walking.
+	circulars map[string]bool
+	basePath  string
+}
+
+func newResolverContext(originalBasePath string) *resolverContext {
+	return &resolverContext{
+		circulars: make(map[string]bool),
+		basePath:  originalBasePath, // keep the root base path in context
+	}
+}
+
+type schemaLoader struct {
+	root    interface{}
+	options *ExpandOptions
+	cache   ResolutionCache
+	context *resolverContext
+	loadDoc func(string) (json.RawMessage, error)
+}
+
+func (r *schemaLoader) transitiveResolver(basePath string, ref Ref) (*schemaLoader, error) {
+	if ref.IsRoot() || ref.HasFragmentOnly {
+		return r, nil
+	}
+
+	baseRef, _ := NewRef(basePath)
+	currentRef := normalizeFileRef(&ref, basePath)
+	if strings.HasPrefix(currentRef.String(), baseRef.String()) {
+		return r, nil
+	}
+
+	// Set a new root to resolve against
+	rootURL := currentRef.GetURL()
+	rootURL.Fragment = ""
+	root, _ := r.cache.Get(rootURL.String())
+
+	// shallow copy of resolver options to set a new RelativeBase when
+	// traversing multiple documents
+	newOptions := r.options
+	newOptions.RelativeBase = rootURL.String()
+	debugLog("setting new root: %s", newOptions.RelativeBase)
+	resolver, err := defaultSchemaLoader(root, newOptions, r.cache, r.context)
+	if err != nil {
+		return nil, err
+	}
+
+	return resolver, nil
+}
+
+func (r *schemaLoader) updateBasePath(transitive *schemaLoader, basePath string) string {
+	if transitive != r {
+		debugLog("got a new resolver")
+		if transitive.options != nil && transitive.options.RelativeBase != "" {
+			basePath, _ = absPath(transitive.options.RelativeBase)
+			debugLog("new basePath = %s", basePath)
+		}
+	}
+	return basePath
+}
+
+func (r *schemaLoader) resolveRef(ref *Ref, target interface{}, basePath string) error {
+	tgt := reflect.ValueOf(target)
+	if tgt.Kind() != reflect.Ptr {
+		return fmt.Errorf("resolve ref: target needs to be a pointer")
+	}
+
+	refURL := ref.GetURL()
+	if refURL == nil {
+		return nil
+	}
+
+	var res interface{}
+	var data interface{}
+	var err error
+	// Resolve against the root if it isn't nil, and if ref is pointing at the root, or has a fragment only which means
+	// it is pointing somewhere in the root.
+	root := r.root
+	if (ref.IsRoot() || ref.HasFragmentOnly) && root == nil && basePath != "" {
+		if baseRef, erb := NewRef(basePath); erb == nil {
+			root, _, _, _ = r.load(baseRef.GetURL())
+		}
+	}
+	if (ref.IsRoot() || ref.HasFragmentOnly) && root != nil {
+		data = root
+	} else {
+		baseRef := normalizeFileRef(ref, basePath)
+		debugLog("current ref is: %s", ref.String())
+		debugLog("current ref normalized file: %s", baseRef.String())
+		data, _, _, err = r.load(baseRef.GetURL())
+		if err != nil {
+			return err
+		}
+	}
+
+	res = data
+	if ref.String() != "" {
+		res, _, err = ref.GetPointer().Get(data)
+		if err != nil {
+			return err
+		}
+	}
+	return swag.DynamicJSONToStruct(res, target)
+}
+
+func (r *schemaLoader) load(refURL *url.URL) (interface{}, url.URL, bool, error) {
+	debugLog("loading schema from url: %s", refURL)
+	toFetch := *refURL
+	toFetch.Fragment = ""
+
+	normalized := normalizeAbsPath(toFetch.String())
+
+	data, fromCache := r.cache.Get(normalized)
+	if !fromCache {
+		b, err := r.loadDoc(normalized)
+		if err != nil {
+			debugLog("unable to load the document: %v", err)
+			return nil, url.URL{}, false, err
+		}
+
+		if err := json.Unmarshal(b, &data); err != nil {
+			return nil, url.URL{}, false, err
+		}
+		r.cache.Set(normalized, data)
+	}
+
+	return data, toFetch, fromCache, nil
+}
+
+// isCircular detects cycles in sequences of $ref.
+// It relies on a private context (which needs not be locked).
+func (r *schemaLoader) isCircular(ref *Ref, basePath string, parentRefs ...string) (foundCycle bool) {
+	normalizedRef := normalizePaths(ref.String(), basePath)
+	if _, ok := r.context.circulars[normalizedRef]; ok {
+		// circular $ref has been already detected in another explored cycle
+		foundCycle = true
+		return
+	}
+	foundCycle = swag.ContainsStringsCI(parentRefs, normalizedRef)
+	if foundCycle {
+		r.context.circulars[normalizedRef] = true
+	}
+	return
+}
+
+// Resolve resolves a reference against basePath and stores the result in target
+// Resolve is not in charge of following references, it only resolves ref by following its URL
+// if the schema that ref is referring to has more refs in it. Resolve doesn't resolve them
+// if basePath is an empty string, ref is resolved against the root schema stored in the schemaLoader struct
+func (r *schemaLoader) Resolve(ref *Ref, target interface{}, basePath string) error {
+	return r.resolveRef(ref, target, basePath)
+}
+
+func (r *schemaLoader) deref(input interface{}, parentRefs []string, basePath string) error {
+	var ref *Ref
+	switch refable := input.(type) {
+	case *Schema:
+		ref = &refable.Ref
+	case *Parameter:
+		ref = &refable.Ref
+	case *Response:
+		ref = &refable.Ref
+	case *PathItem:
+		ref = &refable.Ref
+	default:
+		return fmt.Errorf("deref: unsupported type %T", input)
+	}
+
+	curRef := ref.String()
+	if curRef != "" {
+		normalizedRef := normalizeFileRef(ref, basePath)
+		normalizedBasePath := normalizedRef.RemoteURI()
+
+		if r.isCircular(normalizedRef, basePath, parentRefs...) {
+			return nil
+		}
+
+		if err := r.resolveRef(ref, input, basePath); r.shouldStopOnError(err) {
+			return err
+		}
+
+		// NOTE(fredbi): removed basePath check => needs more testing
+		if ref.String() != "" && ref.String() != curRef {
+			parentRefs = append(parentRefs, normalizedRef.String())
+			return r.deref(input, parentRefs, normalizedBasePath)
+		}
+	}
+
+	return nil
+}
+
+func (r *schemaLoader) shouldStopOnError(err error) bool {
+	if err != nil && !r.options.ContinueOnError {
+		return true
+	}
+
+	if err != nil {
+		log.Println(err)
+	}
+
+	return false
+}
+
+func defaultSchemaLoader(
+	root interface{},
+	expandOptions *ExpandOptions,
+	cache ResolutionCache,
+	context *resolverContext) (*schemaLoader, error) {
+
+	if cache == nil {
+		cache = resCache
+	}
+	if expandOptions == nil {
+		expandOptions = &ExpandOptions{}
+	}
+	absBase, _ := absPath(expandOptions.RelativeBase)
+	if context == nil {
+		context = newResolverContext(absBase)
+	}
+	return &schemaLoader{
+		root:    root,
+		options: expandOptions,
+		cache:   cache,
+		context: context,
+		loadDoc: func(path string) (json.RawMessage, error) {
+			debugLog("fetching document at %q", path)
+			return PathLoader(path)
+		},
+	}, nil
+}
diff --git a/vendor/github.com/go-openapi/spec/security_scheme.go b/vendor/github.com/go-openapi/spec/security_scheme.go
new file mode 100644
index 00000000..fe353842
--- /dev/null
+++ b/vendor/github.com/go-openapi/spec/security_scheme.go
@@ -0,0 +1,140 @@
+// Copyright 2015 go-swagger maintainers
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//    http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package spec
+
+import (
+	"encoding/json"
+
+	"github.com/go-openapi/jsonpointer"
+	"github.com/go-openapi/swag"
+)
+
+const (
+	basic       = "basic"
+	apiKey      = "apiKey"
+	oauth2      = "oauth2"
+	implicit    = "implicit"
+	password    = "password"
+	application = "application"
+	accessCode  = "accessCode"
+)
+
+// BasicAuth creates a basic auth security scheme
+func BasicAuth() *SecurityScheme {
+	return &SecurityScheme{SecuritySchemeProps: SecuritySchemeProps{Type: basic}}
+}
+
+// APIKeyAuth creates an api key auth security scheme
+func APIKeyAuth(fieldName, valueSource string) *SecurityScheme {
+	return &SecurityScheme{SecuritySchemeProps: SecuritySchemeProps{Type: apiKey, Name: fieldName, In: valueSource}}
+}
+
+// OAuth2Implicit creates an implicit flow oauth2 security scheme
+func OAuth2Implicit(authorizationURL string) *SecurityScheme {
+	return &SecurityScheme{SecuritySchemeProps: SecuritySchemeProps{
+		Type:             oauth2,
+		Flow:             implicit,
+		AuthorizationURL: authorizationURL,
+	}}
+}
+
+// OAuth2Password creates a password flow oauth2 security scheme
+func OAuth2Password(tokenURL string) *SecurityScheme {
+	return &SecurityScheme{SecuritySchemeProps: SecuritySchemeProps{
+		Type:     oauth2,
+		Flow:     password,
+		TokenURL: tokenURL,
+	}}
+}
+
+// OAuth2Application creates an application flow oauth2 security scheme
+func OAuth2Application(tokenURL string) *SecurityScheme {
+	return &SecurityScheme{SecuritySchemeProps: SecuritySchemeProps{
+		Type:     oauth2,
+		Flow:     application,
+		TokenURL: tokenURL,
+	}}
+}
+
+// OAuth2AccessToken creates an access token flow oauth2 security scheme
+func OAuth2AccessToken(authorizationURL, tokenURL string) *SecurityScheme {
+	return &SecurityScheme{SecuritySchemeProps: SecuritySchemeProps{
+		Type:             oauth2,
+		Flow:             accessCode,
+		AuthorizationURL: authorizationURL,
+		TokenURL:         tokenURL,
+	}}
+}
+
+// SecuritySchemeProps describes a swagger security scheme in the securityDefinitions section
+type SecuritySchemeProps struct {
+	Description      string            `json:"description,omitempty"`
+	Type             string            `json:"type"`
+	Name             string            `json:"name,omitempty"`             // api key
+	In               string            `json:"in,omitempty"`               // api key
+	Flow             string            `json:"flow,omitempty"`             // oauth2
+	AuthorizationURL string            `json:"authorizationUrl,omitempty"` // oauth2
+	TokenURL         string            `json:"tokenUrl,omitempty"`         // oauth2
+	Scopes           map[string]string `json:"scopes,omitempty"`           // oauth2
+}
+
+// AddScope adds a scope to this security scheme
+func (s *SecuritySchemeProps) AddScope(scope, description string) {
+	if s.Scopes == nil {
+		s.Scopes = make(map[string]string)
+	}
+	s.Scopes[scope] = description
+}
+
+// SecurityScheme allows the definition of a security scheme that can be used by the operations.
+// Supported schemes are basic authentication, an API key (either as a header or as a query parameter)
+// and OAuth2's common flows (implicit, password, application and access code).
+//
+// For more information: http://goo.gl/8us55a#securitySchemeObject
+type SecurityScheme struct {
+	VendorExtensible
+	SecuritySchemeProps
+}
+
+// JSONLookup implements an interface to customize json pointer lookup
+func (s SecurityScheme) JSONLookup(token string) (interface{}, error) {
+	if ex, ok := s.Extensions[token]; ok {
+		return &ex, nil
+	}
+
+	r, _, err := jsonpointer.GetForToken(s.SecuritySchemeProps, token)
+	return r, err
+}
+
+// MarshalJSON marshal this to JSON
+func (s SecurityScheme) MarshalJSON() ([]byte, error) {
+	b1, err := json.Marshal(s.SecuritySchemeProps)
+	if err != nil {
+		return nil, err
+	}
+	b2, err := json.Marshal(s.VendorExtensible)
+	if err != nil {
+		return nil, err
+	}
+	return swag.ConcatJSON(b1, b2), nil
+}
+
+// UnmarshalJSON marshal this from JSON
+func (s *SecurityScheme) UnmarshalJSON(data []byte) error {
+	if err := json.Unmarshal(data, &s.SecuritySchemeProps); err != nil {
+		return err
+	}
+	return json.Unmarshal(data, &s.VendorExtensible)
+}
diff --git a/vendor/github.com/go-openapi/spec/spec.go b/vendor/github.com/go-openapi/spec/spec.go
new file mode 100644
index 00000000..0bb045bc
--- /dev/null
+++ b/vendor/github.com/go-openapi/spec/spec.go
@@ -0,0 +1,86 @@
+// Copyright 2015 go-swagger maintainers
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//    http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package spec
+
+import "encoding/json"
+
+//go:generate curl -L --progress -o ./schemas/v2/schema.json http://swagger.io/v2/schema.json
+//go:generate curl -L --progress  -o ./schemas/jsonschema-draft-04.json http://json-schema.org/draft-04/schema
+//go:generate go-bindata -pkg=spec -prefix=./schemas -ignore=.*\.md ./schemas/...
+//go:generate perl -pi -e s,Json,JSON,g bindata.go
+
+const (
+	// SwaggerSchemaURL the url for the swagger 2.0 schema to validate specs
+	SwaggerSchemaURL = "http://swagger.io/v2/schema.json#"
+	// JSONSchemaURL the url for the json schema schema
+	JSONSchemaURL = "http://json-schema.org/draft-04/schema#"
+)
+
+var (
+	jsonSchema    *Schema
+	swaggerSchema *Schema
+)
+
+func init() {
+	jsonSchema = MustLoadJSONSchemaDraft04()
+	swaggerSchema = MustLoadSwagger20Schema()
+}
+
+// MustLoadJSONSchemaDraft04 panics when Swagger20Schema returns an error
+func MustLoadJSONSchemaDraft04() *Schema {
+	d, e := JSONSchemaDraft04()
+	if e != nil {
+		panic(e)
+	}
+	return d
+}
+
+// JSONSchemaDraft04 loads the json schema document for json shema draft04
+func JSONSchemaDraft04() (*Schema, error) {
+	b, err := Asset("jsonschema-draft-04.json")
+	if err != nil {
+		return nil, err
+	}
+
+	schema := new(Schema)
+	if err := json.Unmarshal(b, schema); err != nil {
+		return nil, err
+	}
+	return schema, nil
+}
+
+// MustLoadSwagger20Schema panics when Swagger20Schema returns an error
+func MustLoadSwagger20Schema() *Schema {
+	d, e := Swagger20Schema()
+	if e != nil {
+		panic(e)
+	}
+	return d
+}
+
+// Swagger20Schema loads the swagger 2.0 schema from the embedded assets
+func Swagger20Schema() (*Schema, error) {
+
+	b, err := Asset("v2/schema.json")
+	if err != nil {
+		return nil, err
+	}
+
+	schema := new(Schema)
+	if err := json.Unmarshal(b, schema); err != nil {
+		return nil, err
+	}
+	return schema, nil
+}
diff --git a/vendor/github.com/go-openapi/spec/swagger.go b/vendor/github.com/go-openapi/spec/swagger.go
new file mode 100644
index 00000000..44722ffd
--- /dev/null
+++ b/vendor/github.com/go-openapi/spec/swagger.go
@@ -0,0 +1,448 @@
+// Copyright 2015 go-swagger maintainers
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//    http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package spec
+
+import (
+	"bytes"
+	"encoding/gob"
+	"encoding/json"
+	"fmt"
+	"strconv"
+
+	"github.com/go-openapi/jsonpointer"
+	"github.com/go-openapi/swag"
+)
+
+// Swagger this is the root document object for the API specification.
+// It combines what previously was the Resource Listing and API Declaration (version 1.2 and earlier)
+// together into one document.
+//
+// For more information: http://goo.gl/8us55a#swagger-object-
+type Swagger struct {
+	VendorExtensible
+	SwaggerProps
+}
+
+// JSONLookup look up a value by the json property name
+func (s Swagger) JSONLookup(token string) (interface{}, error) {
+	if ex, ok := s.Extensions[token]; ok {
+		return &ex, nil
+	}
+	r, _, err := jsonpointer.GetForToken(s.SwaggerProps, token)
+	return r, err
+}
+
+// MarshalJSON marshals this swagger structure to json
+func (s Swagger) MarshalJSON() ([]byte, error) {
+	b1, err := json.Marshal(s.SwaggerProps)
+	if err != nil {
+		return nil, err
+	}
+	b2, err := json.Marshal(s.VendorExtensible)
+	if err != nil {
+		return nil, err
+	}
+	return swag.ConcatJSON(b1, b2), nil
+}
+
+// UnmarshalJSON unmarshals a swagger spec from json
+func (s *Swagger) UnmarshalJSON(data []byte) error {
+	var sw Swagger
+	if err := json.Unmarshal(data, &sw.SwaggerProps); err != nil {
+		return err
+	}
+	if err := json.Unmarshal(data, &sw.VendorExtensible); err != nil {
+		return err
+	}
+	*s = sw
+	return nil
+}
+
+// GobEncode provides a safe gob encoder for Swagger, including extensions
+func (s Swagger) GobEncode() ([]byte, error) {
+	var b bytes.Buffer
+	raw := struct {
+		Props SwaggerProps
+		Ext   VendorExtensible
+	}{
+		Props: s.SwaggerProps,
+		Ext:   s.VendorExtensible,
+	}
+	err := gob.NewEncoder(&b).Encode(raw)
+	return b.Bytes(), err
+}
+
+// GobDecode provides a safe gob decoder for Swagger, including extensions
+func (s *Swagger) GobDecode(b []byte) error {
+	var raw struct {
+		Props SwaggerProps
+		Ext   VendorExtensible
+	}
+	buf := bytes.NewBuffer(b)
+	err := gob.NewDecoder(buf).Decode(&raw)
+	if err != nil {
+		return err
+	}
+	s.SwaggerProps = raw.Props
+	s.VendorExtensible = raw.Ext
+	return nil
+}
+
+// SwaggerProps captures the top-level properties of an Api specification
+//
+// NOTE: validation rules
+// - the scheme, when present must be from [http, https, ws, wss]
+// - BasePath must start with a leading "/"
+// - Paths is required
+type SwaggerProps struct {
+	ID                  string                 `json:"id,omitempty"`
+	Consumes            []string               `json:"consumes,omitempty"`
+	Produces            []string               `json:"produces,omitempty"`
+	Schemes             []string               `json:"schemes,omitempty"`
+	Swagger             string                 `json:"swagger,omitempty"`
+	Info                *Info                  `json:"info,omitempty"`
+	Host                string                 `json:"host,omitempty"`
+	BasePath            string                 `json:"basePath,omitempty"`
+	Paths               *Paths                 `json:"paths"`
+	Definitions         Definitions            `json:"definitions,omitempty"`
+	Parameters          map[string]Parameter   `json:"parameters,omitempty"`
+	Responses           map[string]Response    `json:"responses,omitempty"`
+	SecurityDefinitions SecurityDefinitions    `json:"securityDefinitions,omitempty"`
+	Security            []map[string][]string  `json:"security,omitempty"`
+	Tags                []Tag                  `json:"tags,omitempty"`
+	ExternalDocs        *ExternalDocumentation `json:"externalDocs,omitempty"`
+}
+
+type swaggerPropsAlias SwaggerProps
+
+type gobSwaggerPropsAlias struct {
+	Security []map[string]struct {
+		List []string
+		Pad  bool
+	}
+	Alias           *swaggerPropsAlias
+	SecurityIsEmpty bool
+}
+
+// GobEncode provides a safe gob encoder for SwaggerProps, including empty security requirements
+func (o SwaggerProps) GobEncode() ([]byte, error) {
+	raw := gobSwaggerPropsAlias{
+		Alias: (*swaggerPropsAlias)(&o),
+	}
+
+	var b bytes.Buffer
+	if o.Security == nil {
+		// nil security requirement
+		err := gob.NewEncoder(&b).Encode(raw)
+		return b.Bytes(), err
+	}
+
+	if len(o.Security) == 0 {
+		// empty, but non-nil security requirement
+		raw.SecurityIsEmpty = true
+		raw.Alias.Security = nil
+		err := gob.NewEncoder(&b).Encode(raw)
+		return b.Bytes(), err
+	}
+
+	raw.Security = make([]map[string]struct {
+		List []string
+		Pad  bool
+	}, 0, len(o.Security))
+	for _, req := range o.Security {
+		v := make(map[string]struct {
+			List []string
+			Pad  bool
+		}, len(req))
+		for k, val := range req {
+			v[k] = struct {
+				List []string
+				Pad  bool
+			}{
+				List: val,
+			}
+		}
+		raw.Security = append(raw.Security, v)
+	}
+
+	err := gob.NewEncoder(&b).Encode(raw)
+	return b.Bytes(), err
+}
+
+// GobDecode provides a safe gob decoder for SwaggerProps, including empty security requirements
+func (o *SwaggerProps) GobDecode(b []byte) error {
+	var raw gobSwaggerPropsAlias
+
+	buf := bytes.NewBuffer(b)
+	err := gob.NewDecoder(buf).Decode(&raw)
+	if err != nil {
+		return err
+	}
+	if raw.Alias == nil {
+		return nil
+	}
+
+	switch {
+	case raw.SecurityIsEmpty:
+		// empty, but non-nil security requirement
+		raw.Alias.Security = []map[string][]string{}
+	case len(raw.Alias.Security) == 0:
+		// nil security requirement
+		raw.Alias.Security = nil
+	default:
+		raw.Alias.Security = make([]map[string][]string, 0, len(raw.Security))
+		for _, req := range raw.Security {
+			v := make(map[string][]string, len(req))
+			for k, val := range req {
+				v[k] = make([]string, 0, len(val.List))
+				v[k] = append(v[k], val.List...)
+			}
+			raw.Alias.Security = append(raw.Alias.Security, v)
+		}
+	}
+
+	*o = *(*SwaggerProps)(raw.Alias)
+	return nil
+}
+
+// Dependencies represent a dependencies property
+type Dependencies map[string]SchemaOrStringArray
+
+// SchemaOrBool represents a schema or boolean value, is biased towards true for the boolean property
+type SchemaOrBool struct {
+	Allows bool
+	Schema *Schema
+}
+
+// JSONLookup implements an interface to customize json pointer lookup
+func (s SchemaOrBool) JSONLookup(token string) (interface{}, error) {
+	if token == "allows" {
+		return s.Allows, nil
+	}
+	r, _, err := jsonpointer.GetForToken(s.Schema, token)
+	return r, err
+}
+
+var jsTrue = []byte("true")
+var jsFalse = []byte("false")
+
+// MarshalJSON convert this object to JSON
+func (s SchemaOrBool) MarshalJSON() ([]byte, error) {
+	if s.Schema != nil {
+		return json.Marshal(s.Schema)
+	}
+
+	if s.Schema == nil && !s.Allows {
+		return jsFalse, nil
+	}
+	return jsTrue, nil
+}
+
+// UnmarshalJSON converts this bool or schema object from a JSON structure
+func (s *SchemaOrBool) UnmarshalJSON(data []byte) error {
+	var nw SchemaOrBool
+	if len(data) >= 4 {
+		if data[0] == '{' {
+			var sch Schema
+			if err := json.Unmarshal(data, &sch); err != nil {
+				return err
+			}
+			nw.Schema = &sch
+		}
+		nw.Allows = !(data[0] == 'f' && data[1] == 'a' && data[2] == 'l' && data[3] == 's' && data[4] == 'e')
+	}
+	*s = nw
+	return nil
+}
+
+// SchemaOrStringArray represents a schema or a string array
+type SchemaOrStringArray struct {
+	Schema   *Schema
+	Property []string
+}
+
+// JSONLookup implements an interface to customize json pointer lookup
+func (s SchemaOrStringArray) JSONLookup(token string) (interface{}, error) {
+	r, _, err := jsonpointer.GetForToken(s.Schema, token)
+	return r, err
+}
+
+// MarshalJSON converts this schema object or array into JSON structure
+func (s SchemaOrStringArray) MarshalJSON() ([]byte, error) {
+	if len(s.Property) > 0 {
+		return json.Marshal(s.Property)
+	}
+	if s.Schema != nil {
+		return json.Marshal(s.Schema)
+	}
+	return []byte("null"), nil
+}
+
+// UnmarshalJSON converts this schema object or array from a JSON structure
+func (s *SchemaOrStringArray) UnmarshalJSON(data []byte) error {
+	var first byte
+	if len(data) > 1 {
+		first = data[0]
+	}
+	var nw SchemaOrStringArray
+	if first == '{' {
+		var sch Schema
+		if err := json.Unmarshal(data, &sch); err != nil {
+			return err
+		}
+		nw.Schema = &sch
+	}
+	if first == '[' {
+		if err := json.Unmarshal(data, &nw.Property); err != nil {
+			return err
+		}
+	}
+	*s = nw
+	return nil
+}
+
+// Definitions contains the models explicitly defined in this spec
+// An object to hold data types that can be consumed and produced by operations.
+// These data types can be primitives, arrays or models.
+//
+// For more information: http://goo.gl/8us55a#definitionsObject
+type Definitions map[string]Schema
+
+// SecurityDefinitions a declaration of the security schemes available to be used in the specification.
+// This does not enforce the security schemes on the operations and only serves to provide
+// the relevant details for each scheme.
+//
+// For more information: http://goo.gl/8us55a#securityDefinitionsObject
+type SecurityDefinitions map[string]*SecurityScheme
+
+// StringOrArray represents a value that can either be a string
+// or an array of strings. Mainly here for serialization purposes
+type StringOrArray []string
+
+// Contains returns true when the value is contained in the slice
+func (s StringOrArray) Contains(value string) bool {
+	for _, str := range s {
+		if str == value {
+			return true
+		}
+	}
+	return false
+}
+
+// JSONLookup implements an interface to customize json pointer lookup
+func (s SchemaOrArray) JSONLookup(token string) (interface{}, error) {
+	if _, err := strconv.Atoi(token); err == nil {
+		r, _, err := jsonpointer.GetForToken(s.Schemas, token)
+		return r, err
+	}
+	r, _, err := jsonpointer.GetForToken(s.Schema, token)
+	return r, err
+}
+
+// UnmarshalJSON unmarshals this string or array object from a JSON array or JSON string
+func (s *StringOrArray) UnmarshalJSON(data []byte) error {
+	var first byte
+	if len(data) > 1 {
+		first = data[0]
+	}
+
+	if first == '[' {
+		var parsed []string
+		if err := json.Unmarshal(data, &parsed); err != nil {
+			return err
+		}
+		*s = StringOrArray(parsed)
+		return nil
+	}
+
+	var single interface{}
+	if err := json.Unmarshal(data, &single); err != nil {
+		return err
+	}
+	if single == nil {
+		return nil
+	}
+	switch v := single.(type) {
+	case string:
+		*s = StringOrArray([]string{v})
+		return nil
+	default:
+		return fmt.Errorf("only string or array is allowed, not %T", single)
+	}
+}
+
+// MarshalJSON converts this string or array to a JSON array or JSON string
+func (s StringOrArray) MarshalJSON() ([]byte, error) {
+	if len(s) == 1 {
+		return json.Marshal([]string(s)[0])
+	}
+	return json.Marshal([]string(s))
+}
+
+// SchemaOrArray represents a value that can either be a Schema
+// or an array of Schema. Mainly here for serialization purposes
+type SchemaOrArray struct {
+	Schema  *Schema
+	Schemas []Schema
+}
+
+// Len returns the number of schemas in this property
+func (s SchemaOrArray) Len() int {
+	if s.Schema != nil {
+		return 1
+	}
+	return len(s.Schemas)
+}
+
+// ContainsType returns true when one of the schemas is of the specified type
+func (s *SchemaOrArray) ContainsType(name string) bool {
+	if s.Schema != nil {
+		return s.Schema.Type != nil && s.Schema.Type.Contains(name)
+	}
+	return false
+}
+
+// MarshalJSON converts this schema object or array into JSON structure
+func (s SchemaOrArray) MarshalJSON() ([]byte, error) {
+	if len(s.Schemas) > 0 {
+		return json.Marshal(s.Schemas)
+	}
+	return json.Marshal(s.Schema)
+}
+
+// UnmarshalJSON converts this schema object or array from a JSON structure
+func (s *SchemaOrArray) UnmarshalJSON(data []byte) error {
+	var nw SchemaOrArray
+	var first byte
+	if len(data) > 1 {
+		first = data[0]
+	}
+	if first == '{' {
+		var sch Schema
+		if err := json.Unmarshal(data, &sch); err != nil {
+			return err
+		}
+		nw.Schema = &sch
+	}
+	if first == '[' {
+		if err := json.Unmarshal(data, &nw.Schemas); err != nil {
+			return err
+		}
+	}
+	*s = nw
+	return nil
+}
+
+// vim:set ft=go noet sts=2 sw=2 ts=2:
diff --git a/vendor/github.com/go-openapi/spec/tag.go b/vendor/github.com/go-openapi/spec/tag.go
new file mode 100644
index 00000000..faa3d3de
--- /dev/null
+++ b/vendor/github.com/go-openapi/spec/tag.go
@@ -0,0 +1,75 @@
+// Copyright 2015 go-swagger maintainers
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//    http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package spec
+
+import (
+	"encoding/json"
+
+	"github.com/go-openapi/jsonpointer"
+	"github.com/go-openapi/swag"
+)
+
+// TagProps describe a tag entry in the top level tags section of a swagger spec
+type TagProps struct {
+	Description  string                 `json:"description,omitempty"`
+	Name         string                 `json:"name,omitempty"`
+	ExternalDocs *ExternalDocumentation `json:"externalDocs,omitempty"`
+}
+
+// NewTag creates a new tag
+func NewTag(name, description string, externalDocs *ExternalDocumentation) Tag {
+	return Tag{TagProps: TagProps{Description: description, Name: name, ExternalDocs: externalDocs}}
+}
+
+// Tag allows adding meta data to a single tag that is used by the
+// [Operation Object](http://goo.gl/8us55a#operationObject).
+// It is not mandatory to have a Tag Object per tag used there.
+//
+// For more information: http://goo.gl/8us55a#tagObject
+type Tag struct {
+	VendorExtensible
+	TagProps
+}
+
+// JSONLookup implements an interface to customize json pointer lookup
+func (t Tag) JSONLookup(token string) (interface{}, error) {
+	if ex, ok := t.Extensions[token]; ok {
+		return &ex, nil
+	}
+
+	r, _, err := jsonpointer.GetForToken(t.TagProps, token)
+	return r, err
+}
+
+// MarshalJSON marshal this to JSON
+func (t Tag) MarshalJSON() ([]byte, error) {
+	b1, err := json.Marshal(t.TagProps)
+	if err != nil {
+		return nil, err
+	}
+	b2, err := json.Marshal(t.VendorExtensible)
+	if err != nil {
+		return nil, err
+	}
+	return swag.ConcatJSON(b1, b2), nil
+}
+
+// UnmarshalJSON marshal this from JSON
+func (t *Tag) UnmarshalJSON(data []byte) error {
+	if err := json.Unmarshal(data, &t.TagProps); err != nil {
+		return err
+	}
+	return json.Unmarshal(data, &t.VendorExtensible)
+}
diff --git a/vendor/github.com/go-openapi/spec/unused.go b/vendor/github.com/go-openapi/spec/unused.go
new file mode 100644
index 00000000..aa12b56f
--- /dev/null
+++ b/vendor/github.com/go-openapi/spec/unused.go
@@ -0,0 +1,174 @@
+// Copyright 2015 go-swagger maintainers
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//    http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package spec
+
+/*
+
+import (
+	"net/url"
+	"os"
+	"path"
+	"path/filepath"
+
+	"github.com/go-openapi/jsonpointer"
+)
+
+  // Some currently unused functions and definitions that
+  // used to be part of the expander.
+
+  // Moved here for the record and possible future reuse
+
+var (
+	idPtr, _  = jsonpointer.New("/id")
+	refPtr, _ = jsonpointer.New("/$ref")
+)
+
+func idFromNode(node interface{}) (*Ref, error) {
+	if idValue, _, err := idPtr.Get(node); err == nil {
+		if refStr, ok := idValue.(string); ok && refStr != "" {
+			idRef, err := NewRef(refStr)
+			if err != nil {
+				return nil, err
+			}
+			return &idRef, nil
+		}
+	}
+	return nil, nil
+}
+
+func nextRef(startingNode interface{}, startingRef *Ref, ptr *jsonpointer.Pointer) *Ref {
+	if startingRef == nil {
+		return nil
+	}
+
+	if ptr == nil {
+		return startingRef
+	}
+
+	ret := startingRef
+	var idRef *Ref
+	node := startingNode
+
+	for _, tok := range ptr.DecodedTokens() {
+		node, _, _ = jsonpointer.GetForToken(node, tok)
+		if node == nil {
+			break
+		}
+
+		idRef, _ = idFromNode(node)
+		if idRef != nil {
+			nw, err := ret.Inherits(*idRef)
+			if err != nil {
+				break
+			}
+			ret = nw
+		}
+
+		refRef, _, _ := refPtr.Get(node)
+		if refRef != nil {
+			var rf Ref
+			switch value := refRef.(type) {
+			case string:
+				rf, _ = NewRef(value)
+			}
+			nw, err := ret.Inherits(rf)
+			if err != nil {
+				break
+			}
+			nwURL := nw.GetURL()
+			if nwURL.Scheme == "file" || (nwURL.Scheme == "" && nwURL.Host == "") {
+				nwpt := filepath.ToSlash(nwURL.Path)
+				if filepath.IsAbs(nwpt) {
+					_, err := os.Stat(nwpt)
+					if err != nil {
+						nwURL.Path = filepath.Join(".", nwpt)
+					}
+				}
+			}
+
+			ret = nw
+		}
+
+	}
+
+	return ret
+}
+
+// basePathFromSchemaID returns a new basePath based on an existing basePath and a schema ID
+func basePathFromSchemaID(oldBasePath, id string) string {
+	u, err := url.Parse(oldBasePath)
+	if err != nil {
+		panic(err)
+	}
+	uid, err := url.Parse(id)
+	if err != nil {
+		panic(err)
+	}
+
+	if path.IsAbs(uid.Path) {
+		return id
+	}
+	u.Path = path.Join(path.Dir(u.Path), uid.Path)
+	return u.String()
+}
+*/
+
+// type ExtraSchemaProps map[string]interface{}
+
+// // JSONSchema represents a structure that is a json schema draft 04
+// type JSONSchema struct {
+// 	SchemaProps
+// 	ExtraSchemaProps
+// }
+
+// // MarshalJSON marshal this to JSON
+// func (s JSONSchema) MarshalJSON() ([]byte, error) {
+// 	b1, err := json.Marshal(s.SchemaProps)
+// 	if err != nil {
+// 		return nil, err
+// 	}
+// 	b2, err := s.Ref.MarshalJSON()
+// 	if err != nil {
+// 		return nil, err
+// 	}
+// 	b3, err := s.Schema.MarshalJSON()
+// 	if err != nil {
+// 		return nil, err
+// 	}
+// 	b4, err := json.Marshal(s.ExtraSchemaProps)
+// 	if err != nil {
+// 		return nil, err
+// 	}
+// 	return swag.ConcatJSON(b1, b2, b3, b4), nil
+// }
+
+// // UnmarshalJSON marshal this from JSON
+// func (s *JSONSchema) UnmarshalJSON(data []byte) error {
+// 	var sch JSONSchema
+// 	if err := json.Unmarshal(data, &sch.SchemaProps); err != nil {
+// 		return err
+// 	}
+// 	if err := json.Unmarshal(data, &sch.Ref); err != nil {
+// 		return err
+// 	}
+// 	if err := json.Unmarshal(data, &sch.Schema); err != nil {
+// 		return err
+// 	}
+// 	if err := json.Unmarshal(data, &sch.ExtraSchemaProps); err != nil {
+// 		return err
+// 	}
+// 	*s = sch
+// 	return nil
+// }
diff --git a/vendor/github.com/go-openapi/spec/xml_object.go b/vendor/github.com/go-openapi/spec/xml_object.go
new file mode 100644
index 00000000..945a4670
--- /dev/null
+++ b/vendor/github.com/go-openapi/spec/xml_object.go
@@ -0,0 +1,68 @@
+// Copyright 2015 go-swagger maintainers
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//    http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package spec
+
+// XMLObject a metadata object that allows for more fine-tuned XML model definitions.
+//
+// For more information: http://goo.gl/8us55a#xmlObject
+type XMLObject struct {
+	Name      string `json:"name,omitempty"`
+	Namespace string `json:"namespace,omitempty"`
+	Prefix    string `json:"prefix,omitempty"`
+	Attribute bool   `json:"attribute,omitempty"`
+	Wrapped   bool   `json:"wrapped,omitempty"`
+}
+
+// WithName sets the xml name for the object
+func (x *XMLObject) WithName(name string) *XMLObject {
+	x.Name = name
+	return x
+}
+
+// WithNamespace sets the xml namespace for the object
+func (x *XMLObject) WithNamespace(namespace string) *XMLObject {
+	x.Namespace = namespace
+	return x
+}
+
+// WithPrefix sets the xml prefix for the object
+func (x *XMLObject) WithPrefix(prefix string) *XMLObject {
+	x.Prefix = prefix
+	return x
+}
+
+// AsAttribute flags this object as xml attribute
+func (x *XMLObject) AsAttribute() *XMLObject {
+	x.Attribute = true
+	return x
+}
+
+// AsElement flags this object as an xml node
+func (x *XMLObject) AsElement() *XMLObject {
+	x.Attribute = false
+	return x
+}
+
+// AsWrapped flags this object as wrapped, this is mostly useful for array types
+func (x *XMLObject) AsWrapped() *XMLObject {
+	x.Wrapped = true
+	return x
+}
+
+// AsUnwrapped flags this object as an xml node
+func (x *XMLObject) AsUnwrapped() *XMLObject {
+	x.Wrapped = false
+	return x
+}
diff --git a/vendor/github.com/go-openapi/swag/.editorconfig b/vendor/github.com/go-openapi/swag/.editorconfig
new file mode 100644
index 00000000..3152da69
--- /dev/null
+++ b/vendor/github.com/go-openapi/swag/.editorconfig
@@ -0,0 +1,26 @@
+# top-most EditorConfig file
+root = true
+
+# Unix-style newlines with a newline ending every file
+[*]
+end_of_line = lf
+insert_final_newline = true
+indent_style = space
+indent_size = 2
+trim_trailing_whitespace = true
+
+# Set default charset
+[*.{js,py,go,scala,rb,java,html,css,less,sass,md}]
+charset = utf-8
+
+# Tab indentation (no size specified)
+[*.go]
+indent_style = tab
+
+[*.md]
+trim_trailing_whitespace = false
+
+# Matches the exact files either package.json or .travis.yml
+[{package.json,.travis.yml}]
+indent_style = space
+indent_size = 2
diff --git a/vendor/github.com/go-openapi/swag/.gitignore b/vendor/github.com/go-openapi/swag/.gitignore
new file mode 100644
index 00000000..d69b53ac
--- /dev/null
+++ b/vendor/github.com/go-openapi/swag/.gitignore
@@ -0,0 +1,4 @@
+secrets.yml
+vendor
+Godeps
+.idea
diff --git a/vendor/github.com/go-openapi/swag/.golangci.yml b/vendor/github.com/go-openapi/swag/.golangci.yml
new file mode 100644
index 00000000..625c3d6a
--- /dev/null
+++ b/vendor/github.com/go-openapi/swag/.golangci.yml
@@ -0,0 +1,22 @@
+linters-settings:
+  govet:
+    check-shadowing: true
+  golint:
+    min-confidence: 0
+  gocyclo:
+    min-complexity: 25
+  maligned:
+    suggest-new: true
+  dupl:
+    threshold: 100
+  goconst:
+    min-len: 3
+    min-occurrences: 2
+
+linters:
+  enable-all: true
+  disable:
+    - maligned
+    - lll
+    - gochecknoinits
+    - gochecknoglobals
diff --git a/vendor/github.com/go-openapi/swag/.travis.yml b/vendor/github.com/go-openapi/swag/.travis.yml
new file mode 100644
index 00000000..aa26d876
--- /dev/null
+++ b/vendor/github.com/go-openapi/swag/.travis.yml
@@ -0,0 +1,15 @@
+after_success:
+- bash <(curl -s https://codecov.io/bash)
+go:
+- 1.11.x
+- 1.12.x
+install:
+- GO111MODULE=off go get -u gotest.tools/gotestsum
+env:
+- GO111MODULE=on
+language: go
+notifications:
+  slack:
+    secure: QUWvCkBBK09GF7YtEvHHVt70JOkdlNBG0nIKu/5qc4/nW5HP8I2w0SEf/XR2je0eED1Qe3L/AfMCWwrEj+IUZc3l4v+ju8X8R3Lomhme0Eb0jd1MTMCuPcBT47YCj0M7RON7vXtbFfm1hFJ/jLe5+9FXz0hpXsR24PJc5ZIi/ogNwkaPqG4BmndzecpSh0vc2FJPZUD9LT0I09REY/vXR0oQAalLkW0asGD5taHZTUZq/kBpsNxaAFrLM23i4mUcf33M5fjLpvx5LRICrX/57XpBrDh2TooBU6Qj3CgoY0uPRYUmSNxbVx1czNzl2JtEpb5yjoxfVPQeg0BvQM00G8LJINISR+ohrjhkZmAqchDupAX+yFrxTtORa78CtnIL6z/aTNlgwwVD8kvL/1pFA/JWYmKDmz93mV/+6wubGzNSQCstzjkFA4/iZEKewKUoRIAi/fxyscP6L/rCpmY/4llZZvrnyTqVbt6URWpopUpH4rwYqreXAtJxJsfBJIeSmUIiDIOMGkCTvyTEW3fWGmGoqWtSHLoaWDyAIGb7azb+KvfpWtEcoPFWfSWU+LGee0A/YsUhBl7ADB9A0CJEuR8q4BPpKpfLwPKSiKSAXL7zDkyjExyhtgqbSl2jS+rKIHOZNL8JkCcTP2MKMVd563C5rC5FMKqu3S9m2b6380E=
+script:
+- gotestsum -f short-verbose -- -race -coverprofile=coverage.txt -covermode=atomic ./...
diff --git a/vendor/github.com/go-openapi/swag/CODE_OF_CONDUCT.md b/vendor/github.com/go-openapi/swag/CODE_OF_CONDUCT.md
new file mode 100644
index 00000000..9322b065
--- /dev/null
+++ b/vendor/github.com/go-openapi/swag/CODE_OF_CONDUCT.md
@@ -0,0 +1,74 @@
+# Contributor Covenant Code of Conduct
+
+## Our Pledge
+
+In the interest of fostering an open and welcoming environment, we as
+contributors and maintainers pledge to making participation in our project and
+our community a harassment-free experience for everyone, regardless of age, body
+size, disability, ethnicity, gender identity and expression, level of experience,
+nationality, personal appearance, race, religion, or sexual identity and
+orientation.
+
+## Our Standards
+
+Examples of behavior that contributes to creating a positive environment
+include:
+
+* Using welcoming and inclusive language
+* Being respectful of differing viewpoints and experiences
+* Gracefully accepting constructive criticism
+* Focusing on what is best for the community
+* Showing empathy towards other community members
+
+Examples of unacceptable behavior by participants include:
+
+* The use of sexualized language or imagery and unwelcome sexual attention or
+advances
+* Trolling, insulting/derogatory comments, and personal or political attacks
+* Public or private harassment
+* Publishing others' private information, such as a physical or electronic
+  address, without explicit permission
+* Other conduct which could reasonably be considered inappropriate in a
+  professional setting
+
+## Our Responsibilities
+
+Project maintainers are responsible for clarifying the standards of acceptable
+behavior and are expected to take appropriate and fair corrective action in
+response to any instances of unacceptable behavior.
+
+Project maintainers have the right and responsibility to remove, edit, or
+reject comments, commits, code, wiki edits, issues, and other contributions
+that are not aligned to this Code of Conduct, or to ban temporarily or
+permanently any contributor for other behaviors that they deem inappropriate,
+threatening, offensive, or harmful.
+
+## Scope
+
+This Code of Conduct applies both within project spaces and in public spaces
+when an individual is representing the project or its community. Examples of
+representing a project or community include using an official project e-mail
+address, posting via an official social media account, or acting as an appointed
+representative at an online or offline event. Representation of a project may be
+further defined and clarified by project maintainers.
+
+## Enforcement
+
+Instances of abusive, harassing, or otherwise unacceptable behavior may be
+reported by contacting the project team at ivan+abuse@flanders.co.nz. All
+complaints will be reviewed and investigated and will result in a response that
+is deemed necessary and appropriate to the circumstances. The project team is
+obligated to maintain confidentiality with regard to the reporter of an incident.
+Further details of specific enforcement policies may be posted separately.
+
+Project maintainers who do not follow or enforce the Code of Conduct in good
+faith may face temporary or permanent repercussions as determined by other
+members of the project's leadership.
+
+## Attribution
+
+This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4,
+available at [http://contributor-covenant.org/version/1/4][version]
+
+[homepage]: http://contributor-covenant.org
+[version]: http://contributor-covenant.org/version/1/4/
diff --git a/vendor/github.com/go-openapi/swag/LICENSE b/vendor/github.com/go-openapi/swag/LICENSE
new file mode 100644
index 00000000..d6456956
--- /dev/null
+++ b/vendor/github.com/go-openapi/swag/LICENSE
@@ -0,0 +1,202 @@
+
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff --git a/vendor/github.com/go-openapi/swag/README.md b/vendor/github.com/go-openapi/swag/README.md
new file mode 100644
index 00000000..eb60ae80
--- /dev/null
+++ b/vendor/github.com/go-openapi/swag/README.md
@@ -0,0 +1,22 @@
+# Swag [![Build Status](https://travis-ci.org/go-openapi/swag.svg?branch=master)](https://travis-ci.org/go-openapi/swag) [![codecov](https://codecov.io/gh/go-openapi/swag/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/swag) [![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io)
+
+[![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/swag/master/LICENSE)
+[![GoDoc](https://godoc.org/github.com/go-openapi/swag?status.svg)](http://godoc.org/github.com/go-openapi/swag)
+[![GolangCI](https://golangci.com/badges/github.com/go-openapi/swag.svg)](https://golangci.com)
+[![Go Report Card](https://goreportcard.com/badge/github.com/go-openapi/swag)](https://goreportcard.com/report/github.com/go-openapi/swag)
+
+Contains a bunch of helper functions for go-openapi and go-swagger projects.
+
+You may also use it standalone for your projects.
+
+* convert between value and pointers for builtin types
+* convert from string to builtin types (wraps strconv)
+* fast json concatenation
+* search in path
+* load from file or http
+* name mangling
+
+
+This repo has only few dependencies outside of the standard library:
+
+* YAML utilities depend on gopkg.in/yaml.v2
diff --git a/vendor/github.com/go-openapi/swag/convert.go b/vendor/github.com/go-openapi/swag/convert.go
new file mode 100644
index 00000000..7da35c31
--- /dev/null
+++ b/vendor/github.com/go-openapi/swag/convert.go
@@ -0,0 +1,208 @@
+// Copyright 2015 go-swagger maintainers
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//    http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package swag
+
+import (
+	"math"
+	"strconv"
+	"strings"
+)
+
+// same as ECMA Number.MAX_SAFE_INTEGER and Number.MIN_SAFE_INTEGER
+const (
+	maxJSONFloat         = float64(1<<53 - 1)  // 9007199254740991.0 	 	 2^53 - 1
+	minJSONFloat         = -float64(1<<53 - 1) //-9007199254740991.0	-2^53 - 1
+	epsilon      float64 = 1e-9
+)
+
+// IsFloat64AJSONInteger allow for integers [-2^53, 2^53-1] inclusive
+func IsFloat64AJSONInteger(f float64) bool {
+	if math.IsNaN(f) || math.IsInf(f, 0) || f < minJSONFloat || f > maxJSONFloat {
+		return false
+	}
+	fa := math.Abs(f)
+	g := float64(uint64(f))
+	ga := math.Abs(g)
+
+	diff := math.Abs(f - g)
+
+	// more info: https://floating-point-gui.de/errors/comparison/#look-out-for-edge-cases
+	switch {
+	case f == g: // best case
+		return true
+	case f == float64(int64(f)) || f == float64(uint64(f)): // optimistic case
+		return true
+	case f == 0 || g == 0 || diff < math.SmallestNonzeroFloat64: // very close to 0 values
+		return diff < (epsilon * math.SmallestNonzeroFloat64)
+	}
+	// check the relative error
+	return diff/math.Min(fa+ga, math.MaxFloat64) < epsilon
+}
+
+var evaluatesAsTrue map[string]struct{}
+
+func init() {
+	evaluatesAsTrue = map[string]struct{}{
+		"true":     {},
+		"1":        {},
+		"yes":      {},
+		"ok":       {},
+		"y":        {},
+		"on":       {},
+		"selected": {},
+		"checked":  {},
+		"t":        {},
+		"enabled":  {},
+	}
+}
+
+// ConvertBool turn a string into a boolean
+func ConvertBool(str string) (bool, error) {
+	_, ok := evaluatesAsTrue[strings.ToLower(str)]
+	return ok, nil
+}
+
+// ConvertFloat32 turn a string into a float32
+func ConvertFloat32(str string) (float32, error) {
+	f, err := strconv.ParseFloat(str, 32)
+	if err != nil {
+		return 0, err
+	}
+	return float32(f), nil
+}
+
+// ConvertFloat64 turn a string into a float64
+func ConvertFloat64(str string) (float64, error) {
+	return strconv.ParseFloat(str, 64)
+}
+
+// ConvertInt8 turn a string into int8 boolean
+func ConvertInt8(str string) (int8, error) {
+	i, err := strconv.ParseInt(str, 10, 8)
+	if err != nil {
+		return 0, err
+	}
+	return int8(i), nil
+}
+
+// ConvertInt16 turn a string into a int16
+func ConvertInt16(str string) (int16, error) {
+	i, err := strconv.ParseInt(str, 10, 16)
+	if err != nil {
+		return 0, err
+	}
+	return int16(i), nil
+}
+
+// ConvertInt32 turn a string into a int32
+func ConvertInt32(str string) (int32, error) {
+	i, err := strconv.ParseInt(str, 10, 32)
+	if err != nil {
+		return 0, err
+	}
+	return int32(i), nil
+}
+
+// ConvertInt64 turn a string into a int64
+func ConvertInt64(str string) (int64, error) {
+	return strconv.ParseInt(str, 10, 64)
+}
+
+// ConvertUint8 turn a string into a uint8
+func ConvertUint8(str string) (uint8, error) {
+	i, err := strconv.ParseUint(str, 10, 8)
+	if err != nil {
+		return 0, err
+	}
+	return uint8(i), nil
+}
+
+// ConvertUint16 turn a string into a uint16
+func ConvertUint16(str string) (uint16, error) {
+	i, err := strconv.ParseUint(str, 10, 16)
+	if err != nil {
+		return 0, err
+	}
+	return uint16(i), nil
+}
+
+// ConvertUint32 turn a string into a uint32
+func ConvertUint32(str string) (uint32, error) {
+	i, err := strconv.ParseUint(str, 10, 32)
+	if err != nil {
+		return 0, err
+	}
+	return uint32(i), nil
+}
+
+// ConvertUint64 turn a string into a uint64
+func ConvertUint64(str string) (uint64, error) {
+	return strconv.ParseUint(str, 10, 64)
+}
+
+// FormatBool turns a boolean into a string
+func FormatBool(value bool) string {
+	return strconv.FormatBool(value)
+}
+
+// FormatFloat32 turns a float32 into a string
+func FormatFloat32(value float32) string {
+	return strconv.FormatFloat(float64(value), 'f', -1, 32)
+}
+
+// FormatFloat64 turns a float64 into a string
+func FormatFloat64(value float64) string {
+	return strconv.FormatFloat(value, 'f', -1, 64)
+}
+
+// FormatInt8 turns an int8 into a string
+func FormatInt8(value int8) string {
+	return strconv.FormatInt(int64(value), 10)
+}
+
+// FormatInt16 turns an int16 into a string
+func FormatInt16(value int16) string {
+	return strconv.FormatInt(int64(value), 10)
+}
+
+// FormatInt32 turns an int32 into a string
+func FormatInt32(value int32) string {
+	return strconv.Itoa(int(value))
+}
+
+// FormatInt64 turns an int64 into a string
+func FormatInt64(value int64) string {
+	return strconv.FormatInt(value, 10)
+}
+
+// FormatUint8 turns an uint8 into a string
+func FormatUint8(value uint8) string {
+	return strconv.FormatUint(uint64(value), 10)
+}
+
+// FormatUint16 turns an uint16 into a string
+func FormatUint16(value uint16) string {
+	return strconv.FormatUint(uint64(value), 10)
+}
+
+// FormatUint32 turns an uint32 into a string
+func FormatUint32(value uint32) string {
+	return strconv.FormatUint(uint64(value), 10)
+}
+
+// FormatUint64 turns an uint64 into a string
+func FormatUint64(value uint64) string {
+	return strconv.FormatUint(value, 10)
+}
diff --git a/vendor/github.com/go-openapi/swag/convert_types.go b/vendor/github.com/go-openapi/swag/convert_types.go
new file mode 100644
index 00000000..c95e4e78
--- /dev/null
+++ b/vendor/github.com/go-openapi/swag/convert_types.go
@@ -0,0 +1,595 @@
+package swag
+
+import "time"
+
+// This file was taken from the aws go sdk
+
+// String returns a pointer to of the string value passed in.
+func String(v string) *string {
+	return &v
+}
+
+// StringValue returns the value of the string pointer passed in or
+// "" if the pointer is nil.
+func StringValue(v *string) string {
+	if v != nil {
+		return *v
+	}
+	return ""
+}
+
+// StringSlice converts a slice of string values into a slice of
+// string pointers
+func StringSlice(src []string) []*string {
+	dst := make([]*string, len(src))
+	for i := 0; i < len(src); i++ {
+		dst[i] = &(src[i])
+	}
+	return dst
+}
+
+// StringValueSlice converts a slice of string pointers into a slice of
+// string values
+func StringValueSlice(src []*string) []string {
+	dst := make([]string, len(src))
+	for i := 0; i < len(src); i++ {
+		if src[i] != nil {
+			dst[i] = *(src[i])
+		}
+	}
+	return dst
+}
+
+// StringMap converts a string map of string values into a string
+// map of string pointers
+func StringMap(src map[string]string) map[string]*string {
+	dst := make(map[string]*string)
+	for k, val := range src {
+		v := val
+		dst[k] = &v
+	}
+	return dst
+}
+
+// StringValueMap converts a string map of string pointers into a string
+// map of string values
+func StringValueMap(src map[string]*string) map[string]string {
+	dst := make(map[string]string)
+	for k, val := range src {
+		if val != nil {
+			dst[k] = *val
+		}
+	}
+	return dst
+}
+
+// Bool returns a pointer to of the bool value passed in.
+func Bool(v bool) *bool {
+	return &v
+}
+
+// BoolValue returns the value of the bool pointer passed in or
+// false if the pointer is nil.
+func BoolValue(v *bool) bool {
+	if v != nil {
+		return *v
+	}
+	return false
+}
+
+// BoolSlice converts a slice of bool values into a slice of
+// bool pointers
+func BoolSlice(src []bool) []*bool {
+	dst := make([]*bool, len(src))
+	for i := 0; i < len(src); i++ {
+		dst[i] = &(src[i])
+	}
+	return dst
+}
+
+// BoolValueSlice converts a slice of bool pointers into a slice of
+// bool values
+func BoolValueSlice(src []*bool) []bool {
+	dst := make([]bool, len(src))
+	for i := 0; i < len(src); i++ {
+		if src[i] != nil {
+			dst[i] = *(src[i])
+		}
+	}
+	return dst
+}
+
+// BoolMap converts a string map of bool values into a string
+// map of bool pointers
+func BoolMap(src map[string]bool) map[string]*bool {
+	dst := make(map[string]*bool)
+	for k, val := range src {
+		v := val
+		dst[k] = &v
+	}
+	return dst
+}
+
+// BoolValueMap converts a string map of bool pointers into a string
+// map of bool values
+func BoolValueMap(src map[string]*bool) map[string]bool {
+	dst := make(map[string]bool)
+	for k, val := range src {
+		if val != nil {
+			dst[k] = *val
+		}
+	}
+	return dst
+}
+
+// Int returns a pointer to of the int value passed in.
+func Int(v int) *int {
+	return &v
+}
+
+// IntValue returns the value of the int pointer passed in or
+// 0 if the pointer is nil.
+func IntValue(v *int) int {
+	if v != nil {
+		return *v
+	}
+	return 0
+}
+
+// IntSlice converts a slice of int values into a slice of
+// int pointers
+func IntSlice(src []int) []*int {
+	dst := make([]*int, len(src))
+	for i := 0; i < len(src); i++ {
+		dst[i] = &(src[i])
+	}
+	return dst
+}
+
+// IntValueSlice converts a slice of int pointers into a slice of
+// int values
+func IntValueSlice(src []*int) []int {
+	dst := make([]int, len(src))
+	for i := 0; i < len(src); i++ {
+		if src[i] != nil {
+			dst[i] = *(src[i])
+		}
+	}
+	return dst
+}
+
+// IntMap converts a string map of int values into a string
+// map of int pointers
+func IntMap(src map[string]int) map[string]*int {
+	dst := make(map[string]*int)
+	for k, val := range src {
+		v := val
+		dst[k] = &v
+	}
+	return dst
+}
+
+// IntValueMap converts a string map of int pointers into a string
+// map of int values
+func IntValueMap(src map[string]*int) map[string]int {
+	dst := make(map[string]int)
+	for k, val := range src {
+		if val != nil {
+			dst[k] = *val
+		}
+	}
+	return dst
+}
+
+// Int32 returns a pointer to of the int64 value passed in.
+func Int32(v int32) *int32 {
+	return &v
+}
+
+// Int32Value returns the value of the int64 pointer passed in or
+// 0 if the pointer is nil.
+func Int32Value(v *int32) int32 {
+	if v != nil {
+		return *v
+	}
+	return 0
+}
+
+// Int32Slice converts a slice of int64 values into a slice of
+// int32 pointers
+func Int32Slice(src []int32) []*int32 {
+	dst := make([]*int32, len(src))
+	for i := 0; i < len(src); i++ {
+		dst[i] = &(src[i])
+	}
+	return dst
+}
+
+// Int32ValueSlice converts a slice of int32 pointers into a slice of
+// int32 values
+func Int32ValueSlice(src []*int32) []int32 {
+	dst := make([]int32, len(src))
+	for i := 0; i < len(src); i++ {
+		if src[i] != nil {
+			dst[i] = *(src[i])
+		}
+	}
+	return dst
+}
+
+// Int32Map converts a string map of int32 values into a string
+// map of int32 pointers
+func Int32Map(src map[string]int32) map[string]*int32 {
+	dst := make(map[string]*int32)
+	for k, val := range src {
+		v := val
+		dst[k] = &v
+	}
+	return dst
+}
+
+// Int32ValueMap converts a string map of int32 pointers into a string
+// map of int32 values
+func Int32ValueMap(src map[string]*int32) map[string]int32 {
+	dst := make(map[string]int32)
+	for k, val := range src {
+		if val != nil {
+			dst[k] = *val
+		}
+	}
+	return dst
+}
+
+// Int64 returns a pointer to of the int64 value passed in.
+func Int64(v int64) *int64 {
+	return &v
+}
+
+// Int64Value returns the value of the int64 pointer passed in or
+// 0 if the pointer is nil.
+func Int64Value(v *int64) int64 {
+	if v != nil {
+		return *v
+	}
+	return 0
+}
+
+// Int64Slice converts a slice of int64 values into a slice of
+// int64 pointers
+func Int64Slice(src []int64) []*int64 {
+	dst := make([]*int64, len(src))
+	for i := 0; i < len(src); i++ {
+		dst[i] = &(src[i])
+	}
+	return dst
+}
+
+// Int64ValueSlice converts a slice of int64 pointers into a slice of
+// int64 values
+func Int64ValueSlice(src []*int64) []int64 {
+	dst := make([]int64, len(src))
+	for i := 0; i < len(src); i++ {
+		if src[i] != nil {
+			dst[i] = *(src[i])
+		}
+	}
+	return dst
+}
+
+// Int64Map converts a string map of int64 values into a string
+// map of int64 pointers
+func Int64Map(src map[string]int64) map[string]*int64 {
+	dst := make(map[string]*int64)
+	for k, val := range src {
+		v := val
+		dst[k] = &v
+	}
+	return dst
+}
+
+// Int64ValueMap converts a string map of int64 pointers into a string
+// map of int64 values
+func Int64ValueMap(src map[string]*int64) map[string]int64 {
+	dst := make(map[string]int64)
+	for k, val := range src {
+		if val != nil {
+			dst[k] = *val
+		}
+	}
+	return dst
+}
+
+// Uint returns a pouinter to of the uint value passed in.
+func Uint(v uint) *uint {
+	return &v
+}
+
+// UintValue returns the value of the uint pouinter passed in or
+// 0 if the pouinter is nil.
+func UintValue(v *uint) uint {
+	if v != nil {
+		return *v
+	}
+	return 0
+}
+
+// UintSlice converts a slice of uint values uinto a slice of
+// uint pouinters
+func UintSlice(src []uint) []*uint {
+	dst := make([]*uint, len(src))
+	for i := 0; i < len(src); i++ {
+		dst[i] = &(src[i])
+	}
+	return dst
+}
+
+// UintValueSlice converts a slice of uint pouinters uinto a slice of
+// uint values
+func UintValueSlice(src []*uint) []uint {
+	dst := make([]uint, len(src))
+	for i := 0; i < len(src); i++ {
+		if src[i] != nil {
+			dst[i] = *(src[i])
+		}
+	}
+	return dst
+}
+
+// UintMap converts a string map of uint values uinto a string
+// map of uint pouinters
+func UintMap(src map[string]uint) map[string]*uint {
+	dst := make(map[string]*uint)
+	for k, val := range src {
+		v := val
+		dst[k] = &v
+	}
+	return dst
+}
+
+// UintValueMap converts a string map of uint pouinters uinto a string
+// map of uint values
+func UintValueMap(src map[string]*uint) map[string]uint {
+	dst := make(map[string]uint)
+	for k, val := range src {
+		if val != nil {
+			dst[k] = *val
+		}
+	}
+	return dst
+}
+
+// Uint32 returns a pouinter to of the uint64 value passed in.
+func Uint32(v uint32) *uint32 {
+	return &v
+}
+
+// Uint32Value returns the value of the uint64 pouinter passed in or
+// 0 if the pouinter is nil.
+func Uint32Value(v *uint32) uint32 {
+	if v != nil {
+		return *v
+	}
+	return 0
+}
+
+// Uint32Slice converts a slice of uint64 values uinto a slice of
+// uint32 pouinters
+func Uint32Slice(src []uint32) []*uint32 {
+	dst := make([]*uint32, len(src))
+	for i := 0; i < len(src); i++ {
+		dst[i] = &(src[i])
+	}
+	return dst
+}
+
+// Uint32ValueSlice converts a slice of uint32 pouinters uinto a slice of
+// uint32 values
+func Uint32ValueSlice(src []*uint32) []uint32 {
+	dst := make([]uint32, len(src))
+	for i := 0; i < len(src); i++ {
+		if src[i] != nil {
+			dst[i] = *(src[i])
+		}
+	}
+	return dst
+}
+
+// Uint32Map converts a string map of uint32 values uinto a string
+// map of uint32 pouinters
+func Uint32Map(src map[string]uint32) map[string]*uint32 {
+	dst := make(map[string]*uint32)
+	for k, val := range src {
+		v := val
+		dst[k] = &v
+	}
+	return dst
+}
+
+// Uint32ValueMap converts a string map of uint32 pouinters uinto a string
+// map of uint32 values
+func Uint32ValueMap(src map[string]*uint32) map[string]uint32 {
+	dst := make(map[string]uint32)
+	for k, val := range src {
+		if val != nil {
+			dst[k] = *val
+		}
+	}
+	return dst
+}
+
+// Uint64 returns a pouinter to of the uint64 value passed in.
+func Uint64(v uint64) *uint64 {
+	return &v
+}
+
+// Uint64Value returns the value of the uint64 pouinter passed in or
+// 0 if the pouinter is nil.
+func Uint64Value(v *uint64) uint64 {
+	if v != nil {
+		return *v
+	}
+	return 0
+}
+
+// Uint64Slice converts a slice of uint64 values uinto a slice of
+// uint64 pouinters
+func Uint64Slice(src []uint64) []*uint64 {
+	dst := make([]*uint64, len(src))
+	for i := 0; i < len(src); i++ {
+		dst[i] = &(src[i])
+	}
+	return dst
+}
+
+// Uint64ValueSlice converts a slice of uint64 pouinters uinto a slice of
+// uint64 values
+func Uint64ValueSlice(src []*uint64) []uint64 {
+	dst := make([]uint64, len(src))
+	for i := 0; i < len(src); i++ {
+		if src[i] != nil {
+			dst[i] = *(src[i])
+		}
+	}
+	return dst
+}
+
+// Uint64Map converts a string map of uint64 values uinto a string
+// map of uint64 pouinters
+func Uint64Map(src map[string]uint64) map[string]*uint64 {
+	dst := make(map[string]*uint64)
+	for k, val := range src {
+		v := val
+		dst[k] = &v
+	}
+	return dst
+}
+
+// Uint64ValueMap converts a string map of uint64 pouinters uinto a string
+// map of uint64 values
+func Uint64ValueMap(src map[string]*uint64) map[string]uint64 {
+	dst := make(map[string]uint64)
+	for k, val := range src {
+		if val != nil {
+			dst[k] = *val
+		}
+	}
+	return dst
+}
+
+// Float64 returns a pointer to of the float64 value passed in.
+func Float64(v float64) *float64 {
+	return &v
+}
+
+// Float64Value returns the value of the float64 pointer passed in or
+// 0 if the pointer is nil.
+func Float64Value(v *float64) float64 {
+	if v != nil {
+		return *v
+	}
+	return 0
+}
+
+// Float64Slice converts a slice of float64 values into a slice of
+// float64 pointers
+func Float64Slice(src []float64) []*float64 {
+	dst := make([]*float64, len(src))
+	for i := 0; i < len(src); i++ {
+		dst[i] = &(src[i])
+	}
+	return dst
+}
+
+// Float64ValueSlice converts a slice of float64 pointers into a slice of
+// float64 values
+func Float64ValueSlice(src []*float64) []float64 {
+	dst := make([]float64, len(src))
+	for i := 0; i < len(src); i++ {
+		if src[i] != nil {
+			dst[i] = *(src[i])
+		}
+	}
+	return dst
+}
+
+// Float64Map converts a string map of float64 values into a string
+// map of float64 pointers
+func Float64Map(src map[string]float64) map[string]*float64 {
+	dst := make(map[string]*float64)
+	for k, val := range src {
+		v := val
+		dst[k] = &v
+	}
+	return dst
+}
+
+// Float64ValueMap converts a string map of float64 pointers into a string
+// map of float64 values
+func Float64ValueMap(src map[string]*float64) map[string]float64 {
+	dst := make(map[string]float64)
+	for k, val := range src {
+		if val != nil {
+			dst[k] = *val
+		}
+	}
+	return dst
+}
+
+// Time returns a pointer to of the time.Time value passed in.
+func Time(v time.Time) *time.Time {
+	return &v
+}
+
+// TimeValue returns the value of the time.Time pointer passed in or
+// time.Time{} if the pointer is nil.
+func TimeValue(v *time.Time) time.Time {
+	if v != nil {
+		return *v
+	}
+	return time.Time{}
+}
+
+// TimeSlice converts a slice of time.Time values into a slice of
+// time.Time pointers
+func TimeSlice(src []time.Time) []*time.Time {
+	dst := make([]*time.Time, len(src))
+	for i := 0; i < len(src); i++ {
+		dst[i] = &(src[i])
+	}
+	return dst
+}
+
+// TimeValueSlice converts a slice of time.Time pointers into a slice of
+// time.Time values
+func TimeValueSlice(src []*time.Time) []time.Time {
+	dst := make([]time.Time, len(src))
+	for i := 0; i < len(src); i++ {
+		if src[i] != nil {
+			dst[i] = *(src[i])
+		}
+	}
+	return dst
+}
+
+// TimeMap converts a string map of time.Time values into a string
+// map of time.Time pointers
+func TimeMap(src map[string]time.Time) map[string]*time.Time {
+	dst := make(map[string]*time.Time)
+	for k, val := range src {
+		v := val
+		dst[k] = &v
+	}
+	return dst
+}
+
+// TimeValueMap converts a string map of time.Time pointers into a string
+// map of time.Time values
+func TimeValueMap(src map[string]*time.Time) map[string]time.Time {
+	dst := make(map[string]time.Time)
+	for k, val := range src {
+		if val != nil {
+			dst[k] = *val
+		}
+	}
+	return dst
+}
diff --git a/vendor/github.com/go-openapi/swag/doc.go b/vendor/github.com/go-openapi/swag/doc.go
new file mode 100644
index 00000000..8d2c8c50
--- /dev/null
+++ b/vendor/github.com/go-openapi/swag/doc.go
@@ -0,0 +1,32 @@
+// Copyright 2015 go-swagger maintainers
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//    http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+/*
+Package swag contains a bunch of helper functions for go-openapi and go-swagger projects.
+
+You may also use it standalone for your projects.
+
+  * convert between value and pointers for builtin types
+  * convert from string to builtin types (wraps strconv)
+  * fast json concatenation
+  * search in path
+  * load from file or http
+  * name mangling
+
+
+This repo has only few dependencies outside of the standard library:
+
+  * YAML utilities depend on gopkg.in/yaml.v2
+*/
+package swag
diff --git a/vendor/github.com/go-openapi/swag/go.mod b/vendor/github.com/go-openapi/swag/go.mod
new file mode 100644
index 00000000..15bbb082
--- /dev/null
+++ b/vendor/github.com/go-openapi/swag/go.mod
@@ -0,0 +1,14 @@
+module github.com/go-openapi/swag
+
+require (
+	github.com/davecgh/go-spew v1.1.1 // indirect
+	github.com/kr/pretty v0.1.0 // indirect
+	github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63
+	github.com/stretchr/testify v1.3.0
+	gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 // indirect
+	gopkg.in/yaml.v2 v2.2.2
+)
+
+replace github.com/golang/lint => golang.org/x/lint v0.0.0-20190409202823-959b441ac422
+
+replace sourcegraph.com/sourcegraph/go-diff => github.com/sourcegraph/go-diff v0.5.1
diff --git a/vendor/github.com/go-openapi/swag/go.sum b/vendor/github.com/go-openapi/swag/go.sum
new file mode 100644
index 00000000..33469f54
--- /dev/null
+++ b/vendor/github.com/go-openapi/swag/go.sum
@@ -0,0 +1,20 @@
+github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
+github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
+github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
+github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
+github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
+github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
+github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63 h1:nTT4s92Dgz2HlrB2NaMgvlfqHH39OgMhA7z3PK7PGD4=
+github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
+github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
+github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q=
+github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=
+gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw=
+gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
diff --git a/vendor/github.com/go-openapi/swag/json.go b/vendor/github.com/go-openapi/swag/json.go
new file mode 100644
index 00000000..edf93d84
--- /dev/null
+++ b/vendor/github.com/go-openapi/swag/json.go
@@ -0,0 +1,312 @@
+// Copyright 2015 go-swagger maintainers
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//    http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package swag
+
+import (
+	"bytes"
+	"encoding/json"
+	"log"
+	"reflect"
+	"strings"
+	"sync"
+
+	"github.com/mailru/easyjson/jlexer"
+	"github.com/mailru/easyjson/jwriter"
+)
+
+// nullJSON represents a JSON object with null type
+var nullJSON = []byte("null")
+
+// DefaultJSONNameProvider the default cache for types
+var DefaultJSONNameProvider = NewNameProvider()
+
+const comma = byte(',')
+
+var closers map[byte]byte
+
+func init() {
+	closers = map[byte]byte{
+		'{': '}',
+		'[': ']',
+	}
+}
+
+type ejMarshaler interface {
+	MarshalEasyJSON(w *jwriter.Writer)
+}
+
+type ejUnmarshaler interface {
+	UnmarshalEasyJSON(w *jlexer.Lexer)
+}
+
+// WriteJSON writes json data, prefers finding an appropriate interface to short-circuit the marshaller
+// so it takes the fastest option available.
+func WriteJSON(data interface{}) ([]byte, error) {
+	if d, ok := data.(ejMarshaler); ok {
+		jw := new(jwriter.Writer)
+		d.MarshalEasyJSON(jw)
+		return jw.BuildBytes()
+	}
+	if d, ok := data.(json.Marshaler); ok {
+		return d.MarshalJSON()
+	}
+	return json.Marshal(data)
+}
+
+// ReadJSON reads json data, prefers finding an appropriate interface to short-circuit the unmarshaller
+// so it takes the fastes option available
+func ReadJSON(data []byte, value interface{}) error {
+	trimmedData := bytes.Trim(data, "\x00")
+	if d, ok := value.(ejUnmarshaler); ok {
+		jl := &jlexer.Lexer{Data: trimmedData}
+		d.UnmarshalEasyJSON(jl)
+		return jl.Error()
+	}
+	if d, ok := value.(json.Unmarshaler); ok {
+		return d.UnmarshalJSON(trimmedData)
+	}
+	return json.Unmarshal(trimmedData, value)
+}
+
+// DynamicJSONToStruct converts an untyped json structure into a struct
+func DynamicJSONToStruct(data interface{}, target interface{}) error {
+	// TODO: convert straight to a json typed map  (mergo + iterate?)
+	b, err := WriteJSON(data)
+	if err != nil {
+		return err
+	}
+	return ReadJSON(b, target)
+}
+
+// ConcatJSON concatenates multiple json objects efficiently
+func ConcatJSON(blobs ...[]byte) []byte {
+	if len(blobs) == 0 {
+		return nil
+	}
+
+	last := len(blobs) - 1
+	for blobs[last] == nil || bytes.Equal(blobs[last], nullJSON) {
+		// strips trailing null objects
+		last--
+		if last < 0 {
+			// there was nothing but "null"s or nil...
+			return nil
+		}
+	}
+	if last == 0 {
+		return blobs[0]
+	}
+
+	var opening, closing byte
+	var idx, a int
+	buf := bytes.NewBuffer(nil)
+
+	for i, b := range blobs[:last+1] {
+		if b == nil || bytes.Equal(b, nullJSON) {
+			// a null object is in the list: skip it
+			continue
+		}
+		if len(b) > 0 && opening == 0 { // is this an array or an object?
+			opening, closing = b[0], closers[b[0]]
+		}
+
+		if opening != '{' && opening != '[' {
+			continue // don't know how to concatenate non container objects
+		}
+
+		if len(b) < 3 { // yep empty but also the last one, so closing this thing
+			if i == last && a > 0 {
+				if err := buf.WriteByte(closing); err != nil {
+					log.Println(err)
+				}
+			}
+			continue
+		}
+
+		idx = 0
+		if a > 0 { // we need to join with a comma for everything beyond the first non-empty item
+			if err := buf.WriteByte(comma); err != nil {
+				log.Println(err)
+			}
+			idx = 1 // this is not the first or the last so we want to drop the leading bracket
+		}
+
+		if i != last { // not the last one, strip brackets
+			if _, err := buf.Write(b[idx : len(b)-1]); err != nil {
+				log.Println(err)
+			}
+		} else { // last one, strip only the leading bracket
+			if _, err := buf.Write(b[idx:]); err != nil {
+				log.Println(err)
+			}
+		}
+		a++
+	}
+	// somehow it ended up being empty, so provide a default value
+	if buf.Len() == 0 {
+		if err := buf.WriteByte(opening); err != nil {
+			log.Println(err)
+		}
+		if err := buf.WriteByte(closing); err != nil {
+			log.Println(err)
+		}
+	}
+	return buf.Bytes()
+}
+
+// ToDynamicJSON turns an object into a properly JSON typed structure
+func ToDynamicJSON(data interface{}) interface{} {
+	// TODO: convert straight to a json typed map (mergo + iterate?)
+	b, err := json.Marshal(data)
+	if err != nil {
+		log.Println(err)
+	}
+	var res interface{}
+	if err := json.Unmarshal(b, &res); err != nil {
+		log.Println(err)
+	}
+	return res
+}
+
+// FromDynamicJSON turns an object into a properly JSON typed structure
+func FromDynamicJSON(data, target interface{}) error {
+	b, err := json.Marshal(data)
+	if err != nil {
+		log.Println(err)
+	}
+	return json.Unmarshal(b, target)
+}
+
+// NameProvider represents an object capabale of translating from go property names
+// to json property names
+// This type is thread-safe.
+type NameProvider struct {
+	lock  *sync.Mutex
+	index map[reflect.Type]nameIndex
+}
+
+type nameIndex struct {
+	jsonNames map[string]string
+	goNames   map[string]string
+}
+
+// NewNameProvider creates a new name provider
+func NewNameProvider() *NameProvider {
+	return &NameProvider{
+		lock:  &sync.Mutex{},
+		index: make(map[reflect.Type]nameIndex),
+	}
+}
+
+func buildnameIndex(tpe reflect.Type, idx, reverseIdx map[string]string) {
+	for i := 0; i < tpe.NumField(); i++ {
+		targetDes := tpe.Field(i)
+
+		if targetDes.PkgPath != "" { // unexported
+			continue
+		}
+
+		if targetDes.Anonymous { // walk embedded structures tree down first
+			buildnameIndex(targetDes.Type, idx, reverseIdx)
+			continue
+		}
+
+		if tag := targetDes.Tag.Get("json"); tag != "" {
+
+			parts := strings.Split(tag, ",")
+			if len(parts) == 0 {
+				continue
+			}
+
+			nm := parts[0]
+			if nm == "-" {
+				continue
+			}
+			if nm == "" { // empty string means we want to use the Go name
+				nm = targetDes.Name
+			}
+
+			idx[nm] = targetDes.Name
+			reverseIdx[targetDes.Name] = nm
+		}
+	}
+}
+
+func newNameIndex(tpe reflect.Type) nameIndex {
+	var idx = make(map[string]string, tpe.NumField())
+	var reverseIdx = make(map[string]string, tpe.NumField())
+
+	buildnameIndex(tpe, idx, reverseIdx)
+	return nameIndex{jsonNames: idx, goNames: reverseIdx}
+}
+
+// GetJSONNames gets all the json property names for a type
+func (n *NameProvider) GetJSONNames(subject interface{}) []string {
+	n.lock.Lock()
+	defer n.lock.Unlock()
+	tpe := reflect.Indirect(reflect.ValueOf(subject)).Type()
+	names, ok := n.index[tpe]
+	if !ok {
+		names = n.makeNameIndex(tpe)
+	}
+
+	res := make([]string, 0, len(names.jsonNames))
+	for k := range names.jsonNames {
+		res = append(res, k)
+	}
+	return res
+}
+
+// GetJSONName gets the json name for a go property name
+func (n *NameProvider) GetJSONName(subject interface{}, name string) (string, bool) {
+	tpe := reflect.Indirect(reflect.ValueOf(subject)).Type()
+	return n.GetJSONNameForType(tpe, name)
+}
+
+// GetJSONNameForType gets the json name for a go property name on a given type
+func (n *NameProvider) GetJSONNameForType(tpe reflect.Type, name string) (string, bool) {
+	n.lock.Lock()
+	defer n.lock.Unlock()
+	names, ok := n.index[tpe]
+	if !ok {
+		names = n.makeNameIndex(tpe)
+	}
+	nme, ok := names.goNames[name]
+	return nme, ok
+}
+
+func (n *NameProvider) makeNameIndex(tpe reflect.Type) nameIndex {
+	names := newNameIndex(tpe)
+	n.index[tpe] = names
+	return names
+}
+
+// GetGoName gets the go name for a json property name
+func (n *NameProvider) GetGoName(subject interface{}, name string) (string, bool) {
+	tpe := reflect.Indirect(reflect.ValueOf(subject)).Type()
+	return n.GetGoNameForType(tpe, name)
+}
+
+// GetGoNameForType gets the go name for a given type for a json property name
+func (n *NameProvider) GetGoNameForType(tpe reflect.Type, name string) (string, bool) {
+	n.lock.Lock()
+	defer n.lock.Unlock()
+	names, ok := n.index[tpe]
+	if !ok {
+		names = n.makeNameIndex(tpe)
+	}
+	nme, ok := names.jsonNames[name]
+	return nme, ok
+}
diff --git a/vendor/github.com/go-openapi/swag/loading.go b/vendor/github.com/go-openapi/swag/loading.go
new file mode 100644
index 00000000..70f4fb36
--- /dev/null
+++ b/vendor/github.com/go-openapi/swag/loading.go
@@ -0,0 +1,80 @@
+// Copyright 2015 go-swagger maintainers
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//    http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package swag
+
+import (
+	"fmt"
+	"io/ioutil"
+	"log"
+	"net/http"
+	"path/filepath"
+	"strings"
+	"time"
+)
+
+// LoadHTTPTimeout the default timeout for load requests
+var LoadHTTPTimeout = 30 * time.Second
+
+// LoadFromFileOrHTTP loads the bytes from a file or a remote http server based on the path passed in
+func LoadFromFileOrHTTP(path string) ([]byte, error) {
+	return LoadStrategy(path, ioutil.ReadFile, loadHTTPBytes(LoadHTTPTimeout))(path)
+}
+
+// LoadFromFileOrHTTPWithTimeout loads the bytes from a file or a remote http server based on the path passed in
+// timeout arg allows for per request overriding of the request timeout
+func LoadFromFileOrHTTPWithTimeout(path string, timeout time.Duration) ([]byte, error) {
+	return LoadStrategy(path, ioutil.ReadFile, loadHTTPBytes(timeout))(path)
+}
+
+// LoadStrategy returns a loader function for a given path or uri
+func LoadStrategy(path string, local, remote func(string) ([]byte, error)) func(string) ([]byte, error) {
+	if strings.HasPrefix(path, "http") {
+		return remote
+	}
+	return func(pth string) ([]byte, error) {
+		upth, err := pathUnescape(pth)
+		if err != nil {
+			return nil, err
+		}
+		return local(filepath.FromSlash(upth))
+	}
+}
+
+func loadHTTPBytes(timeout time.Duration) func(path string) ([]byte, error) {
+	return func(path string) ([]byte, error) {
+		client := &http.Client{Timeout: timeout}
+		req, err := http.NewRequest("GET", path, nil)
+		if err != nil {
+			return nil, err
+		}
+		resp, err := client.Do(req)
+		defer func() {
+			if resp != nil {
+				if e := resp.Body.Close(); e != nil {
+					log.Println(e)
+				}
+			}
+		}()
+		if err != nil {
+			return nil, err
+		}
+
+		if resp.StatusCode != http.StatusOK {
+			return nil, fmt.Errorf("could not access document at %q [%s] ", path, resp.Status)
+		}
+
+		return ioutil.ReadAll(resp.Body)
+	}
+}
diff --git a/vendor/github.com/go-openapi/swag/name_lexem.go b/vendor/github.com/go-openapi/swag/name_lexem.go
new file mode 100644
index 00000000..aa7f6a9b
--- /dev/null
+++ b/vendor/github.com/go-openapi/swag/name_lexem.go
@@ -0,0 +1,87 @@
+// Copyright 2015 go-swagger maintainers
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//    http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package swag
+
+import "unicode"
+
+type (
+	nameLexem interface {
+		GetUnsafeGoName() string
+		GetOriginal() string
+		IsInitialism() bool
+	}
+
+	initialismNameLexem struct {
+		original          string
+		matchedInitialism string
+	}
+
+	casualNameLexem struct {
+		original string
+	}
+)
+
+func newInitialismNameLexem(original, matchedInitialism string) *initialismNameLexem {
+	return &initialismNameLexem{
+		original:          original,
+		matchedInitialism: matchedInitialism,
+	}
+}
+
+func newCasualNameLexem(original string) *casualNameLexem {
+	return &casualNameLexem{
+		original: original,
+	}
+}
+
+func (l *initialismNameLexem) GetUnsafeGoName() string {
+	return l.matchedInitialism
+}
+
+func (l *casualNameLexem) GetUnsafeGoName() string {
+	var first rune
+	var rest string
+	for i, orig := range l.original {
+		if i == 0 {
+			first = orig
+			continue
+		}
+		if i > 0 {
+			rest = l.original[i:]
+			break
+		}
+	}
+	if len(l.original) > 1 {
+		return string(unicode.ToUpper(first)) + lower(rest)
+	}
+
+	return l.original
+}
+
+func (l *initialismNameLexem) GetOriginal() string {
+	return l.original
+}
+
+func (l *casualNameLexem) GetOriginal() string {
+	return l.original
+}
+
+func (l *initialismNameLexem) IsInitialism() bool {
+	return true
+}
+
+func (l *casualNameLexem) IsInitialism() bool {
+	return false
+}
diff --git a/vendor/github.com/go-openapi/swag/net.go b/vendor/github.com/go-openapi/swag/net.go
new file mode 100644
index 00000000..821235f8
--- /dev/null
+++ b/vendor/github.com/go-openapi/swag/net.go
@@ -0,0 +1,38 @@
+// Copyright 2015 go-swagger maintainers
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//    http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package swag
+
+import (
+	"net"
+	"strconv"
+)
+
+// SplitHostPort splits a network address into a host and a port.
+// The port is -1 when there is no port to be found
+func SplitHostPort(addr string) (host string, port int, err error) {
+	h, p, err := net.SplitHostPort(addr)
+	if err != nil {
+		return "", -1, err
+	}
+	if p == "" {
+		return "", -1, &net.AddrError{Err: "missing port in address", Addr: addr}
+	}
+
+	pi, err := strconv.Atoi(p)
+	if err != nil {
+		return "", -1, err
+	}
+	return h, pi, nil
+}
diff --git a/vendor/github.com/go-openapi/swag/path.go b/vendor/github.com/go-openapi/swag/path.go
new file mode 100644
index 00000000..941bd017
--- /dev/null
+++ b/vendor/github.com/go-openapi/swag/path.go
@@ -0,0 +1,59 @@
+// Copyright 2015 go-swagger maintainers
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//    http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package swag
+
+import (
+	"os"
+	"path/filepath"
+	"runtime"
+	"strings"
+)
+
+const (
+	// GOPATHKey represents the env key for gopath
+	GOPATHKey = "GOPATH"
+)
+
+// FindInSearchPath finds a package in a provided lists of paths
+func FindInSearchPath(searchPath, pkg string) string {
+	pathsList := filepath.SplitList(searchPath)
+	for _, path := range pathsList {
+		if evaluatedPath, err := filepath.EvalSymlinks(filepath.Join(path, "src", pkg)); err == nil {
+			if _, err := os.Stat(evaluatedPath); err == nil {
+				return evaluatedPath
+			}
+		}
+	}
+	return ""
+}
+
+// FindInGoSearchPath finds a package in the $GOPATH:$GOROOT
+func FindInGoSearchPath(pkg string) string {
+	return FindInSearchPath(FullGoSearchPath(), pkg)
+}
+
+// FullGoSearchPath gets the search paths for finding packages
+func FullGoSearchPath() string {
+	allPaths := os.Getenv(GOPATHKey)
+	if allPaths == "" {
+		allPaths = filepath.Join(os.Getenv("HOME"), "go")
+	}
+	if allPaths != "" {
+		allPaths = strings.Join([]string{allPaths, runtime.GOROOT()}, ":")
+	} else {
+		allPaths = runtime.GOROOT()
+	}
+	return allPaths
+}
diff --git a/vendor/github.com/go-openapi/swag/post_go18.go b/vendor/github.com/go-openapi/swag/post_go18.go
new file mode 100644
index 00000000..c2e686d3
--- /dev/null
+++ b/vendor/github.com/go-openapi/swag/post_go18.go
@@ -0,0 +1,23 @@
+// Copyright 2015 go-swagger maintainers
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//    http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build go1.8
+
+package swag
+
+import "net/url"
+
+func pathUnescape(path string) (string, error) {
+	return url.PathUnescape(path)
+}
diff --git a/vendor/github.com/go-openapi/swag/post_go19.go b/vendor/github.com/go-openapi/swag/post_go19.go
new file mode 100644
index 00000000..eb2f2d8b
--- /dev/null
+++ b/vendor/github.com/go-openapi/swag/post_go19.go
@@ -0,0 +1,67 @@
+// Copyright 2015 go-swagger maintainers
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//    http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build go1.9
+
+package swag
+
+import (
+	"sort"
+	"sync"
+)
+
+// indexOfInitialisms is a thread-safe implementation of the sorted index of initialisms.
+// Since go1.9, this may be implemented with sync.Map.
+type indexOfInitialisms struct {
+	sortMutex *sync.Mutex
+	index     *sync.Map
+}
+
+func newIndexOfInitialisms() *indexOfInitialisms {
+	return &indexOfInitialisms{
+		sortMutex: new(sync.Mutex),
+		index:     new(sync.Map),
+	}
+}
+
+func (m *indexOfInitialisms) load(initial map[string]bool) *indexOfInitialisms {
+	m.sortMutex.Lock()
+	defer m.sortMutex.Unlock()
+	for k, v := range initial {
+		m.index.Store(k, v)
+	}
+	return m
+}
+
+func (m *indexOfInitialisms) isInitialism(key string) bool {
+	_, ok := m.index.Load(key)
+	return ok
+}
+
+func (m *indexOfInitialisms) add(key string) *indexOfInitialisms {
+	m.index.Store(key, true)
+	return m
+}
+
+func (m *indexOfInitialisms) sorted() (result []string) {
+	m.sortMutex.Lock()
+	defer m.sortMutex.Unlock()
+	m.index.Range(func(key, value interface{}) bool {
+		k := key.(string)
+		result = append(result, k)
+		return true
+	})
+	sort.Sort(sort.Reverse(byInitialism(result)))
+	return
+}
diff --git a/vendor/github.com/go-openapi/swag/pre_go18.go b/vendor/github.com/go-openapi/swag/pre_go18.go
new file mode 100644
index 00000000..6607f339
--- /dev/null
+++ b/vendor/github.com/go-openapi/swag/pre_go18.go
@@ -0,0 +1,23 @@
+// Copyright 2015 go-swagger maintainers
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//    http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build !go1.8
+
+package swag
+
+import "net/url"
+
+func pathUnescape(path string) (string, error) {
+	return url.QueryUnescape(path)
+}
diff --git a/vendor/github.com/go-openapi/swag/pre_go19.go b/vendor/github.com/go-openapi/swag/pre_go19.go
new file mode 100644
index 00000000..4bae187d
--- /dev/null
+++ b/vendor/github.com/go-openapi/swag/pre_go19.go
@@ -0,0 +1,69 @@
+// Copyright 2015 go-swagger maintainers
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//    http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build !go1.9
+
+package swag
+
+import (
+	"sort"
+	"sync"
+)
+
+// indexOfInitialisms is a thread-safe implementation of the sorted index of initialisms.
+// Before go1.9, this may be implemented with a mutex on the map.
+type indexOfInitialisms struct {
+	getMutex *sync.Mutex
+	index    map[string]bool
+}
+
+func newIndexOfInitialisms() *indexOfInitialisms {
+	return &indexOfInitialisms{
+		getMutex: new(sync.Mutex),
+		index:    make(map[string]bool, 50),
+	}
+}
+
+func (m *indexOfInitialisms) load(initial map[string]bool) *indexOfInitialisms {
+	m.getMutex.Lock()
+	defer m.getMutex.Unlock()
+	for k, v := range initial {
+		m.index[k] = v
+	}
+	return m
+}
+
+func (m *indexOfInitialisms) isInitialism(key string) bool {
+	m.getMutex.Lock()
+	defer m.getMutex.Unlock()
+	_, ok := m.index[key]
+	return ok
+}
+
+func (m *indexOfInitialisms) add(key string) *indexOfInitialisms {
+	m.getMutex.Lock()
+	defer m.getMutex.Unlock()
+	m.index[key] = true
+	return m
+}
+
+func (m *indexOfInitialisms) sorted() (result []string) {
+	m.getMutex.Lock()
+	defer m.getMutex.Unlock()
+	for k := range m.index {
+		result = append(result, k)
+	}
+	sort.Sort(sort.Reverse(byInitialism(result)))
+	return
+}
diff --git a/vendor/github.com/go-openapi/swag/split.go b/vendor/github.com/go-openapi/swag/split.go
new file mode 100644
index 00000000..a1825fb7
--- /dev/null
+++ b/vendor/github.com/go-openapi/swag/split.go
@@ -0,0 +1,262 @@
+// Copyright 2015 go-swagger maintainers
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//    http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package swag
+
+import (
+	"unicode"
+)
+
+var nameReplaceTable = map[rune]string{
+	'@': "At ",
+	'&': "And ",
+	'|': "Pipe ",
+	'$': "Dollar ",
+	'!': "Bang ",
+	'-': "",
+	'_': "",
+}
+
+type (
+	splitter struct {
+		postSplitInitialismCheck bool
+		initialisms              []string
+	}
+
+	splitterOption func(*splitter) *splitter
+)
+
+// split calls the splitter; splitter provides more control and post options
+func split(str string) []string {
+	lexems := newSplitter().split(str)
+	result := make([]string, 0, len(lexems))
+
+	for _, lexem := range lexems {
+		result = append(result, lexem.GetOriginal())
+	}
+
+	return result
+
+}
+
+func (s *splitter) split(str string) []nameLexem {
+	return s.toNameLexems(str)
+}
+
+func newSplitter(options ...splitterOption) *splitter {
+	splitter := &splitter{
+		postSplitInitialismCheck: false,
+		initialisms:              initialisms,
+	}
+
+	for _, option := range options {
+		splitter = option(splitter)
+	}
+
+	return splitter
+}
+
+// withPostSplitInitialismCheck allows to catch initialisms after main split process
+func withPostSplitInitialismCheck(s *splitter) *splitter {
+	s.postSplitInitialismCheck = true
+	return s
+}
+
+type (
+	initialismMatch struct {
+		start, end int
+		body       []rune
+		complete   bool
+	}
+	initialismMatches []*initialismMatch
+)
+
+func (s *splitter) toNameLexems(name string) []nameLexem {
+	nameRunes := []rune(name)
+	matches := s.gatherInitialismMatches(nameRunes)
+	return s.mapMatchesToNameLexems(nameRunes, matches)
+}
+
+func (s *splitter) gatherInitialismMatches(nameRunes []rune) initialismMatches {
+	matches := make(initialismMatches, 0)
+
+	for currentRunePosition, currentRune := range nameRunes {
+		newMatches := make(initialismMatches, 0, len(matches))
+
+		// check current initialism matches
+		for _, match := range matches {
+			if keepCompleteMatch := match.complete; keepCompleteMatch {
+				newMatches = append(newMatches, match)
+				continue
+			}
+
+			// drop failed match
+			currentMatchRune := match.body[currentRunePosition-match.start]
+			if !s.initialismRuneEqual(currentMatchRune, currentRune) {
+				continue
+			}
+
+			// try to complete ongoing match
+			if currentRunePosition-match.start == len(match.body)-1 {
+				// we are close; the next step is to check the symbol ahead
+				// if it is a small letter, then it is not the end of match
+				// but beginning of the next word
+
+				if currentRunePosition < len(nameRunes)-1 {
+					nextRune := nameRunes[currentRunePosition+1]
+					if newWord := unicode.IsLower(nextRune); newWord {
+						// oh ok, it was the start of a new word
+						continue
+					}
+				}
+
+				match.complete = true
+				match.end = currentRunePosition
+			}
+
+			newMatches = append(newMatches, match)
+		}
+
+		// check for new initialism matches
+		for _, initialism := range s.initialisms {
+			initialismRunes := []rune(initialism)
+			if s.initialismRuneEqual(initialismRunes[0], currentRune) {
+				newMatches = append(newMatches, &initialismMatch{
+					start:    currentRunePosition,
+					body:     initialismRunes,
+					complete: false,
+				})
+			}
+		}
+
+		matches = newMatches
+	}
+
+	return matches
+}
+
+func (s *splitter) mapMatchesToNameLexems(nameRunes []rune, matches initialismMatches) []nameLexem {
+	nameLexems := make([]nameLexem, 0)
+
+	var lastAcceptedMatch *initialismMatch
+	for _, match := range matches {
+		if !match.complete {
+			continue
+		}
+
+		if firstMatch := lastAcceptedMatch == nil; firstMatch {
+			nameLexems = append(nameLexems, s.breakCasualString(nameRunes[:match.start])...)
+			nameLexems = append(nameLexems, s.breakInitialism(string(match.body)))
+
+			lastAcceptedMatch = match
+
+			continue
+		}
+
+		if overlappedMatch := match.start <= lastAcceptedMatch.end; overlappedMatch {
+			continue
+		}
+
+		middle := nameRunes[lastAcceptedMatch.end+1 : match.start]
+		nameLexems = append(nameLexems, s.breakCasualString(middle)...)
+		nameLexems = append(nameLexems, s.breakInitialism(string(match.body)))
+
+		lastAcceptedMatch = match
+	}
+
+	// we have not found any accepted matches
+	if lastAcceptedMatch == nil {
+		return s.breakCasualString(nameRunes)
+	}
+
+	if lastAcceptedMatch.end+1 != len(nameRunes) {
+		rest := nameRunes[lastAcceptedMatch.end+1:]
+		nameLexems = append(nameLexems, s.breakCasualString(rest)...)
+	}
+
+	return nameLexems
+}
+
+func (s *splitter) initialismRuneEqual(a, b rune) bool {
+	return a == b
+}
+
+func (s *splitter) breakInitialism(original string) nameLexem {
+	return newInitialismNameLexem(original, original)
+}
+
+func (s *splitter) breakCasualString(str []rune) []nameLexem {
+	segments := make([]nameLexem, 0)
+	currentSegment := ""
+
+	addCasualNameLexem := func(original string) {
+		segments = append(segments, newCasualNameLexem(original))
+	}
+
+	addInitialismNameLexem := func(original, match string) {
+		segments = append(segments, newInitialismNameLexem(original, match))
+	}
+
+	addNameLexem := func(original string) {
+		if s.postSplitInitialismCheck {
+			for _, initialism := range s.initialisms {
+				if upper(initialism) == upper(original) {
+					addInitialismNameLexem(original, initialism)
+					return
+				}
+			}
+		}
+
+		addCasualNameLexem(original)
+	}
+
+	for _, rn := range string(str) {
+		if replace, found := nameReplaceTable[rn]; found {
+			if currentSegment != "" {
+				addNameLexem(currentSegment)
+				currentSegment = ""
+			}
+
+			if replace != "" {
+				addNameLexem(replace)
+			}
+
+			continue
+		}
+
+		if !unicode.In(rn, unicode.L, unicode.M, unicode.N, unicode.Pc) {
+			if currentSegment != "" {
+				addNameLexem(currentSegment)
+				currentSegment = ""
+			}
+
+			continue
+		}
+
+		if unicode.IsUpper(rn) {
+			if currentSegment != "" {
+				addNameLexem(currentSegment)
+			}
+			currentSegment = ""
+		}
+
+		currentSegment += string(rn)
+	}
+
+	if currentSegment != "" {
+		addNameLexem(currentSegment)
+	}
+
+	return segments
+}
diff --git a/vendor/github.com/go-openapi/swag/util.go b/vendor/github.com/go-openapi/swag/util.go
new file mode 100644
index 00000000..9eac16af
--- /dev/null
+++ b/vendor/github.com/go-openapi/swag/util.go
@@ -0,0 +1,385 @@
+// Copyright 2015 go-swagger maintainers
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//    http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package swag
+
+import (
+	"reflect"
+	"strings"
+	"unicode"
+)
+
+// commonInitialisms are common acronyms that are kept as whole uppercased words.
+var commonInitialisms *indexOfInitialisms
+
+// initialisms is a slice of sorted initialisms
+var initialisms []string
+
+var isInitialism func(string) bool
+
+// GoNamePrefixFunc sets an optional rule to prefix go names
+// which do not start with a letter.
+//
+// e.g. to help converting "123" into "{prefix}123"
+//
+// The default is to prefix with "X"
+var GoNamePrefixFunc func(string) string
+
+func init() {
+	// Taken from https://github.com/golang/lint/blob/3390df4df2787994aea98de825b964ac7944b817/lint.go#L732-L769
+	var configuredInitialisms = map[string]bool{
+		"ACL":   true,
+		"API":   true,
+		"ASCII": true,
+		"CPU":   true,
+		"CSS":   true,
+		"DNS":   true,
+		"EOF":   true,
+		"GUID":  true,
+		"HTML":  true,
+		"HTTPS": true,
+		"HTTP":  true,
+		"ID":    true,
+		"IP":    true,
+		"IPv4":  true,
+		"IPv6":  true,
+		"JSON":  true,
+		"LHS":   true,
+		"OAI":   true,
+		"QPS":   true,
+		"RAM":   true,
+		"RHS":   true,
+		"RPC":   true,
+		"SLA":   true,
+		"SMTP":  true,
+		"SQL":   true,
+		"SSH":   true,
+		"TCP":   true,
+		"TLS":   true,
+		"TTL":   true,
+		"UDP":   true,
+		"UI":    true,
+		"UID":   true,
+		"UUID":  true,
+		"URI":   true,
+		"URL":   true,
+		"UTF8":  true,
+		"VM":    true,
+		"XML":   true,
+		"XMPP":  true,
+		"XSRF":  true,
+		"XSS":   true,
+	}
+
+	// a thread-safe index of initialisms
+	commonInitialisms = newIndexOfInitialisms().load(configuredInitialisms)
+	initialisms = commonInitialisms.sorted()
+
+	// a test function
+	isInitialism = commonInitialisms.isInitialism
+}
+
+const (
+	//collectionFormatComma = "csv"
+	collectionFormatSpace = "ssv"
+	collectionFormatTab   = "tsv"
+	collectionFormatPipe  = "pipes"
+	collectionFormatMulti = "multi"
+)
+
+// JoinByFormat joins a string array by a known format (e.g. swagger's collectionFormat attribute):
+//		ssv: space separated value
+//		tsv: tab separated value
+//		pipes: pipe (|) separated value
+//		csv: comma separated value (default)
+func JoinByFormat(data []string, format string) []string {
+	if len(data) == 0 {
+		return data
+	}
+	var sep string
+	switch format {
+	case collectionFormatSpace:
+		sep = " "
+	case collectionFormatTab:
+		sep = "\t"
+	case collectionFormatPipe:
+		sep = "|"
+	case collectionFormatMulti:
+		return data
+	default:
+		sep = ","
+	}
+	return []string{strings.Join(data, sep)}
+}
+
+// SplitByFormat splits a string by a known format:
+//		ssv: space separated value
+//		tsv: tab separated value
+//		pipes: pipe (|) separated value
+//		csv: comma separated value (default)
+//
+func SplitByFormat(data, format string) []string {
+	if data == "" {
+		return nil
+	}
+	var sep string
+	switch format {
+	case collectionFormatSpace:
+		sep = " "
+	case collectionFormatTab:
+		sep = "\t"
+	case collectionFormatPipe:
+		sep = "|"
+	case collectionFormatMulti:
+		return nil
+	default:
+		sep = ","
+	}
+	var result []string
+	for _, s := range strings.Split(data, sep) {
+		if ts := strings.TrimSpace(s); ts != "" {
+			result = append(result, ts)
+		}
+	}
+	return result
+}
+
+type byInitialism []string
+
+func (s byInitialism) Len() int {
+	return len(s)
+}
+func (s byInitialism) Swap(i, j int) {
+	s[i], s[j] = s[j], s[i]
+}
+func (s byInitialism) Less(i, j int) bool {
+	if len(s[i]) != len(s[j]) {
+		return len(s[i]) < len(s[j])
+	}
+
+	return strings.Compare(s[i], s[j]) > 0
+}
+
+// Removes leading whitespaces
+func trim(str string) string {
+	return strings.Trim(str, " ")
+}
+
+// Shortcut to strings.ToUpper()
+func upper(str string) string {
+	return strings.ToUpper(trim(str))
+}
+
+// Shortcut to strings.ToLower()
+func lower(str string) string {
+	return strings.ToLower(trim(str))
+}
+
+// Camelize an uppercased word
+func Camelize(word string) (camelized string) {
+	for pos, ru := range []rune(word) {
+		if pos > 0 {
+			camelized += string(unicode.ToLower(ru))
+		} else {
+			camelized += string(unicode.ToUpper(ru))
+		}
+	}
+	return
+}
+
+// ToFileName lowercases and underscores a go type name
+func ToFileName(name string) string {
+	in := split(name)
+	out := make([]string, 0, len(in))
+
+	for _, w := range in {
+		out = append(out, lower(w))
+	}
+
+	return strings.Join(out, "_")
+}
+
+// ToCommandName lowercases and underscores a go type name
+func ToCommandName(name string) string {
+	in := split(name)
+	out := make([]string, 0, len(in))
+
+	for _, w := range in {
+		out = append(out, lower(w))
+	}
+	return strings.Join(out, "-")
+}
+
+// ToHumanNameLower represents a code name as a human series of words
+func ToHumanNameLower(name string) string {
+	in := newSplitter(withPostSplitInitialismCheck).split(name)
+	out := make([]string, 0, len(in))
+
+	for _, w := range in {
+		if !w.IsInitialism() {
+			out = append(out, lower(w.GetOriginal()))
+		} else {
+			out = append(out, w.GetOriginal())
+		}
+	}
+
+	return strings.Join(out, " ")
+}
+
+// ToHumanNameTitle represents a code name as a human series of words with the first letters titleized
+func ToHumanNameTitle(name string) string {
+	in := newSplitter(withPostSplitInitialismCheck).split(name)
+
+	out := make([]string, 0, len(in))
+	for _, w := range in {
+		original := w.GetOriginal()
+		if !w.IsInitialism() {
+			out = append(out, Camelize(original))
+		} else {
+			out = append(out, original)
+		}
+	}
+	return strings.Join(out, " ")
+}
+
+// ToJSONName camelcases a name which can be underscored or pascal cased
+func ToJSONName(name string) string {
+	in := split(name)
+	out := make([]string, 0, len(in))
+
+	for i, w := range in {
+		if i == 0 {
+			out = append(out, lower(w))
+			continue
+		}
+		out = append(out, Camelize(w))
+	}
+	return strings.Join(out, "")
+}
+
+// ToVarName camelcases a name which can be underscored or pascal cased
+func ToVarName(name string) string {
+	res := ToGoName(name)
+	if isInitialism(res) {
+		return lower(res)
+	}
+	if len(res) <= 1 {
+		return lower(res)
+	}
+	return lower(res[:1]) + res[1:]
+}
+
+// ToGoName translates a swagger name which can be underscored or camel cased to a name that golint likes
+func ToGoName(name string) string {
+	lexems := newSplitter(withPostSplitInitialismCheck).split(name)
+
+	result := ""
+	for _, lexem := range lexems {
+		goName := lexem.GetUnsafeGoName()
+
+		// to support old behavior
+		if lexem.IsInitialism() {
+			goName = upper(goName)
+		}
+		result += goName
+	}
+
+	if len(result) > 0 {
+		// Only prefix with X when the first character isn't an ascii letter
+		first := []rune(result)[0]
+		if !unicode.IsLetter(first) || (first > unicode.MaxASCII && !unicode.IsUpper(first)) {
+			if GoNamePrefixFunc == nil {
+				return "X" + result
+			}
+			result = GoNamePrefixFunc(name) + result
+		}
+		first = []rune(result)[0]
+		if unicode.IsLetter(first) && !unicode.IsUpper(first) {
+			result = string(append([]rune{unicode.ToUpper(first)}, []rune(result)[1:]...))
+		}
+	}
+
+	return result
+}
+
+// ContainsStrings searches a slice of strings for a case-sensitive match
+func ContainsStrings(coll []string, item string) bool {
+	for _, a := range coll {
+		if a == item {
+			return true
+		}
+	}
+	return false
+}
+
+// ContainsStringsCI searches a slice of strings for a case-insensitive match
+func ContainsStringsCI(coll []string, item string) bool {
+	for _, a := range coll {
+		if strings.EqualFold(a, item) {
+			return true
+		}
+	}
+	return false
+}
+
+type zeroable interface {
+	IsZero() bool
+}
+
+// IsZero returns true when the value passed into the function is a zero value.
+// This allows for safer checking of interface values.
+func IsZero(data interface{}) bool {
+	// check for things that have an IsZero method instead
+	if vv, ok := data.(zeroable); ok {
+		return vv.IsZero()
+	}
+	// continue with slightly more complex reflection
+	v := reflect.ValueOf(data)
+	switch v.Kind() {
+	case reflect.String:
+		return v.Len() == 0
+	case reflect.Bool:
+		return !v.Bool()
+	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+		return v.Int() == 0
+	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+		return v.Uint() == 0
+	case reflect.Float32, reflect.Float64:
+		return v.Float() == 0
+	case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice:
+		return v.IsNil()
+	case reflect.Struct, reflect.Array:
+		return reflect.DeepEqual(data, reflect.Zero(v.Type()).Interface())
+	case reflect.Invalid:
+		return true
+	}
+	return false
+}
+
+// AddInitialisms add additional initialisms
+func AddInitialisms(words ...string) {
+	for _, word := range words {
+		//commonInitialisms[upper(word)] = true
+		commonInitialisms.add(upper(word))
+	}
+	// sort again
+	initialisms = commonInitialisms.sorted()
+}
+
+// CommandLineOptionsGroup represents a group of user-defined command line options
+type CommandLineOptionsGroup struct {
+	ShortDescription string
+	LongDescription  string
+	Options          interface{}
+}
diff --git a/vendor/github.com/go-openapi/swag/yaml.go b/vendor/github.com/go-openapi/swag/yaml.go
new file mode 100644
index 00000000..ec969144
--- /dev/null
+++ b/vendor/github.com/go-openapi/swag/yaml.go
@@ -0,0 +1,246 @@
+// Copyright 2015 go-swagger maintainers
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//    http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package swag
+
+import (
+	"encoding/json"
+	"fmt"
+	"path/filepath"
+	"strconv"
+
+	"github.com/mailru/easyjson/jlexer"
+	"github.com/mailru/easyjson/jwriter"
+	yaml "gopkg.in/yaml.v2"
+)
+
+// YAMLMatcher matches yaml
+func YAMLMatcher(path string) bool {
+	ext := filepath.Ext(path)
+	return ext == ".yaml" || ext == ".yml"
+}
+
+// YAMLToJSON converts YAML unmarshaled data into json compatible data
+func YAMLToJSON(data interface{}) (json.RawMessage, error) {
+	jm, err := transformData(data)
+	if err != nil {
+		return nil, err
+	}
+	b, err := WriteJSON(jm)
+	return json.RawMessage(b), err
+}
+
+// BytesToYAMLDoc converts a byte slice into a YAML document
+func BytesToYAMLDoc(data []byte) (interface{}, error) {
+	var canary map[interface{}]interface{} // validate this is an object and not a different type
+	if err := yaml.Unmarshal(data, &canary); err != nil {
+		return nil, err
+	}
+
+	var document yaml.MapSlice // preserve order that is present in the document
+	if err := yaml.Unmarshal(data, &document); err != nil {
+		return nil, err
+	}
+	return document, nil
+}
+
+// JSONMapSlice represent a JSON object, with the order of keys maintained
+type JSONMapSlice []JSONMapItem
+
+// MarshalJSON renders a JSONMapSlice as JSON
+func (s JSONMapSlice) MarshalJSON() ([]byte, error) {
+	w := &jwriter.Writer{Flags: jwriter.NilMapAsEmpty | jwriter.NilSliceAsEmpty}
+	s.MarshalEasyJSON(w)
+	return w.BuildBytes()
+}
+
+// MarshalEasyJSON renders a JSONMapSlice as JSON, using easyJSON
+func (s JSONMapSlice) MarshalEasyJSON(w *jwriter.Writer) {
+	w.RawByte('{')
+
+	ln := len(s)
+	last := ln - 1
+	for i := 0; i < ln; i++ {
+		s[i].MarshalEasyJSON(w)
+		if i != last { // last item
+			w.RawByte(',')
+		}
+	}
+
+	w.RawByte('}')
+}
+
+// UnmarshalJSON makes a JSONMapSlice from JSON
+func (s *JSONMapSlice) UnmarshalJSON(data []byte) error {
+	l := jlexer.Lexer{Data: data}
+	s.UnmarshalEasyJSON(&l)
+	return l.Error()
+}
+
+// UnmarshalEasyJSON makes a JSONMapSlice from JSON, using easyJSON
+func (s *JSONMapSlice) UnmarshalEasyJSON(in *jlexer.Lexer) {
+	if in.IsNull() {
+		in.Skip()
+		return
+	}
+
+	var result JSONMapSlice
+	in.Delim('{')
+	for !in.IsDelim('}') {
+		var mi JSONMapItem
+		mi.UnmarshalEasyJSON(in)
+		result = append(result, mi)
+	}
+	*s = result
+}
+
+// JSONMapItem represents the value of a key in a JSON object held by JSONMapSlice
+type JSONMapItem struct {
+	Key   string
+	Value interface{}
+}
+
+// MarshalJSON renders a JSONMapItem as JSON
+func (s JSONMapItem) MarshalJSON() ([]byte, error) {
+	w := &jwriter.Writer{Flags: jwriter.NilMapAsEmpty | jwriter.NilSliceAsEmpty}
+	s.MarshalEasyJSON(w)
+	return w.BuildBytes()
+}
+
+// MarshalEasyJSON renders a JSONMapItem as JSON, using easyJSON
+func (s JSONMapItem) MarshalEasyJSON(w *jwriter.Writer) {
+	w.String(s.Key)
+	w.RawByte(':')
+	w.Raw(WriteJSON(s.Value))
+}
+
+// UnmarshalJSON makes a JSONMapItem from JSON
+func (s *JSONMapItem) UnmarshalJSON(data []byte) error {
+	l := jlexer.Lexer{Data: data}
+	s.UnmarshalEasyJSON(&l)
+	return l.Error()
+}
+
+// UnmarshalEasyJSON makes a JSONMapItem from JSON, using easyJSON
+func (s *JSONMapItem) UnmarshalEasyJSON(in *jlexer.Lexer) {
+	key := in.UnsafeString()
+	in.WantColon()
+	value := in.Interface()
+	in.WantComma()
+	s.Key = key
+	s.Value = value
+}
+
+func transformData(input interface{}) (out interface{}, err error) {
+	format := func(t interface{}) (string, error) {
+		switch k := t.(type) {
+		case string:
+			return k, nil
+		case uint:
+			return strconv.FormatUint(uint64(k), 10), nil
+		case uint8:
+			return strconv.FormatUint(uint64(k), 10), nil
+		case uint16:
+			return strconv.FormatUint(uint64(k), 10), nil
+		case uint32:
+			return strconv.FormatUint(uint64(k), 10), nil
+		case uint64:
+			return strconv.FormatUint(k, 10), nil
+		case int:
+			return strconv.Itoa(k), nil
+		case int8:
+			return strconv.FormatInt(int64(k), 10), nil
+		case int16:
+			return strconv.FormatInt(int64(k), 10), nil
+		case int32:
+			return strconv.FormatInt(int64(k), 10), nil
+		case int64:
+			return strconv.FormatInt(k, 10), nil
+		default:
+			return "", fmt.Errorf("unexpected map key type, got: %T", k)
+		}
+	}
+
+	switch in := input.(type) {
+	case yaml.MapSlice:
+
+		o := make(JSONMapSlice, len(in))
+		for i, mi := range in {
+			var nmi JSONMapItem
+			if nmi.Key, err = format(mi.Key); err != nil {
+				return nil, err
+			}
+
+			v, ert := transformData(mi.Value)
+			if ert != nil {
+				return nil, ert
+			}
+			nmi.Value = v
+			o[i] = nmi
+		}
+		return o, nil
+	case map[interface{}]interface{}:
+		o := make(JSONMapSlice, 0, len(in))
+		for ke, va := range in {
+			var nmi JSONMapItem
+			if nmi.Key, err = format(ke); err != nil {
+				return nil, err
+			}
+
+			v, ert := transformData(va)
+			if ert != nil {
+				return nil, ert
+			}
+			nmi.Value = v
+			o = append(o, nmi)
+		}
+		return o, nil
+	case []interface{}:
+		len1 := len(in)
+		o := make([]interface{}, len1)
+		for i := 0; i < len1; i++ {
+			o[i], err = transformData(in[i])
+			if err != nil {
+				return nil, err
+			}
+		}
+		return o, nil
+	}
+	return input, nil
+}
+
+// YAMLDoc loads a yaml document from either http or a file and converts it to json
+func YAMLDoc(path string) (json.RawMessage, error) {
+	yamlDoc, err := YAMLData(path)
+	if err != nil {
+		return nil, err
+	}
+
+	data, err := YAMLToJSON(yamlDoc)
+	if err != nil {
+		return nil, err
+	}
+
+	return data, nil
+}
+
+// YAMLData loads a yaml document from either http or a file
+func YAMLData(path string) (interface{}, error) {
+	data, err := LoadFromFileOrHTTP(path)
+	if err != nil {
+		return nil, err
+	}
+
+	return BytesToYAMLDoc(data)
+}
diff --git a/vendor/github.com/google/btree/.travis.yml b/vendor/github.com/google/btree/.travis.yml
new file mode 100644
index 00000000..4f2ee4d9
--- /dev/null
+++ b/vendor/github.com/google/btree/.travis.yml
@@ -0,0 +1 @@
+language: go
diff --git a/vendor/github.com/google/btree/LICENSE b/vendor/github.com/google/btree/LICENSE
new file mode 100644
index 00000000..d6456956
--- /dev/null
+++ b/vendor/github.com/google/btree/LICENSE
@@ -0,0 +1,202 @@
+
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff --git a/vendor/github.com/google/btree/README.md b/vendor/github.com/google/btree/README.md
new file mode 100644
index 00000000..6062a4da
--- /dev/null
+++ b/vendor/github.com/google/btree/README.md
@@ -0,0 +1,12 @@
+# BTree implementation for Go
+
+![Travis CI Build Status](https://api.travis-ci.org/google/btree.svg?branch=master)
+
+This package provides an in-memory B-Tree implementation for Go, useful as
+an ordered, mutable data structure.
+
+The API is based off of the wonderful
+http://godoc.org/github.com/petar/GoLLRB/llrb, and is meant to allow btree to
+act as a drop-in replacement for gollrb trees.
+
+See http://godoc.org/github.com/google/btree for documentation.
diff --git a/vendor/github.com/google/btree/btree.go b/vendor/github.com/google/btree/btree.go
new file mode 100644
index 00000000..6ff062f9
--- /dev/null
+++ b/vendor/github.com/google/btree/btree.go
@@ -0,0 +1,890 @@
+// Copyright 2014 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package btree implements in-memory B-Trees of arbitrary degree.
+//
+// btree implements an in-memory B-Tree for use as an ordered data structure.
+// It is not meant for persistent storage solutions.
+//
+// It has a flatter structure than an equivalent red-black or other binary tree,
+// which in some cases yields better memory usage and/or performance.
+// See some discussion on the matter here:
+//   http://google-opensource.blogspot.com/2013/01/c-containers-that-save-memory-and-time.html
+// Note, though, that this project is in no way related to the C++ B-Tree
+// implementation written about there.
+//
+// Within this tree, each node contains a slice of items and a (possibly nil)
+// slice of children.  For basic numeric values or raw structs, this can cause
+// efficiency differences when compared to equivalent C++ template code that
+// stores values in arrays within the node:
+//   * Due to the overhead of storing values as interfaces (each
+//     value needs to be stored as the value itself, then 2 words for the
+//     interface pointing to that value and its type), resulting in higher
+//     memory use.
+//   * Since interfaces can point to values anywhere in memory, values are
+//     most likely not stored in contiguous blocks, resulting in a higher
+//     number of cache misses.
+// These issues don't tend to matter, though, when working with strings or other
+// heap-allocated structures, since C++-equivalent structures also must store
+// pointers and also distribute their values across the heap.
+//
+// This implementation is designed to be a drop-in replacement to gollrb.LLRB
+// trees, (http://github.com/petar/gollrb), an excellent and probably the most
+// widely used ordered tree implementation in the Go ecosystem currently.
+// Its functions, therefore, exactly mirror those of
+// llrb.LLRB where possible.  Unlike gollrb, though, we currently don't
+// support storing multiple equivalent values.
+package btree
+
+import (
+	"fmt"
+	"io"
+	"sort"
+	"strings"
+	"sync"
+)
+
+// Item represents a single object in the tree.
+type Item interface {
+	// Less tests whether the current item is less than the given argument.
+	//
+	// This must provide a strict weak ordering.
+	// If !a.Less(b) && !b.Less(a), we treat this to mean a == b (i.e. we can only
+	// hold one of either a or b in the tree).
+	Less(than Item) bool
+}
+
+const (
+	DefaultFreeListSize = 32
+)
+
+var (
+	nilItems    = make(items, 16)
+	nilChildren = make(children, 16)
+)
+
+// FreeList represents a free list of btree nodes. By default each
+// BTree has its own FreeList, but multiple BTrees can share the same
+// FreeList.
+// Two Btrees using the same freelist are safe for concurrent write access.
+type FreeList struct {
+	mu       sync.Mutex
+	freelist []*node
+}
+
+// NewFreeList creates a new free list.
+// size is the maximum size of the returned free list.
+func NewFreeList(size int) *FreeList {
+	return &FreeList{freelist: make([]*node, 0, size)}
+}
+
+func (f *FreeList) newNode() (n *node) {
+	f.mu.Lock()
+	index := len(f.freelist) - 1
+	if index < 0 {
+		f.mu.Unlock()
+		return new(node)
+	}
+	n = f.freelist[index]
+	f.freelist[index] = nil
+	f.freelist = f.freelist[:index]
+	f.mu.Unlock()
+	return
+}
+
+// freeNode adds the given node to the list, returning true if it was added
+// and false if it was discarded.
+func (f *FreeList) freeNode(n *node) (out bool) {
+	f.mu.Lock()
+	if len(f.freelist) < cap(f.freelist) {
+		f.freelist = append(f.freelist, n)
+		out = true
+	}
+	f.mu.Unlock()
+	return
+}
+
+// ItemIterator allows callers of Ascend* to iterate in-order over portions of
+// the tree.  When this function returns false, iteration will stop and the
+// associated Ascend* function will immediately return.
+type ItemIterator func(i Item) bool
+
+// New creates a new B-Tree with the given degree.
+//
+// New(2), for example, will create a 2-3-4 tree (each node contains 1-3 items
+// and 2-4 children).
+func New(degree int) *BTree {
+	return NewWithFreeList(degree, NewFreeList(DefaultFreeListSize))
+}
+
+// NewWithFreeList creates a new B-Tree that uses the given node free list.
+func NewWithFreeList(degree int, f *FreeList) *BTree {
+	if degree <= 1 {
+		panic("bad degree")
+	}
+	return &BTree{
+		degree: degree,
+		cow:    &copyOnWriteContext{freelist: f},
+	}
+}
+
+// items stores items in a node.
+type items []Item
+
+// insertAt inserts a value into the given index, pushing all subsequent values
+// forward.
+func (s *items) insertAt(index int, item Item) {
+	*s = append(*s, nil)
+	if index < len(*s) {
+		copy((*s)[index+1:], (*s)[index:])
+	}
+	(*s)[index] = item
+}
+
+// removeAt removes a value at a given index, pulling all subsequent values
+// back.
+func (s *items) removeAt(index int) Item {
+	item := (*s)[index]
+	copy((*s)[index:], (*s)[index+1:])
+	(*s)[len(*s)-1] = nil
+	*s = (*s)[:len(*s)-1]
+	return item
+}
+
+// pop removes and returns the last element in the list.
+func (s *items) pop() (out Item) {
+	index := len(*s) - 1
+	out = (*s)[index]
+	(*s)[index] = nil
+	*s = (*s)[:index]
+	return
+}
+
+// truncate truncates this instance at index so that it contains only the
+// first index items. index must be less than or equal to length.
+func (s *items) truncate(index int) {
+	var toClear items
+	*s, toClear = (*s)[:index], (*s)[index:]
+	for len(toClear) > 0 {
+		toClear = toClear[copy(toClear, nilItems):]
+	}
+}
+
+// find returns the index where the given item should be inserted into this
+// list.  'found' is true if the item already exists in the list at the given
+// index.
+func (s items) find(item Item) (index int, found bool) {
+	i := sort.Search(len(s), func(i int) bool {
+		return item.Less(s[i])
+	})
+	if i > 0 && !s[i-1].Less(item) {
+		return i - 1, true
+	}
+	return i, false
+}
+
+// children stores child nodes in a node.
+type children []*node
+
+// insertAt inserts a value into the given index, pushing all subsequent values
+// forward.
+func (s *children) insertAt(index int, n *node) {
+	*s = append(*s, nil)
+	if index < len(*s) {
+		copy((*s)[index+1:], (*s)[index:])
+	}
+	(*s)[index] = n
+}
+
+// removeAt removes a value at a given index, pulling all subsequent values
+// back.
+func (s *children) removeAt(index int) *node {
+	n := (*s)[index]
+	copy((*s)[index:], (*s)[index+1:])
+	(*s)[len(*s)-1] = nil
+	*s = (*s)[:len(*s)-1]
+	return n
+}
+
+// pop removes and returns the last element in the list.
+func (s *children) pop() (out *node) {
+	index := len(*s) - 1
+	out = (*s)[index]
+	(*s)[index] = nil
+	*s = (*s)[:index]
+	return
+}
+
+// truncate truncates this instance at index so that it contains only the
+// first index children. index must be less than or equal to length.
+func (s *children) truncate(index int) {
+	var toClear children
+	*s, toClear = (*s)[:index], (*s)[index:]
+	for len(toClear) > 0 {
+		toClear = toClear[copy(toClear, nilChildren):]
+	}
+}
+
+// node is an internal node in a tree.
+//
+// It must at all times maintain the invariant that either
+//   * len(children) == 0, len(items) unconstrained
+//   * len(children) == len(items) + 1
+type node struct {
+	items    items
+	children children
+	cow      *copyOnWriteContext
+}
+
+func (n *node) mutableFor(cow *copyOnWriteContext) *node {
+	if n.cow == cow {
+		return n
+	}
+	out := cow.newNode()
+	if cap(out.items) >= len(n.items) {
+		out.items = out.items[:len(n.items)]
+	} else {
+		out.items = make(items, len(n.items), cap(n.items))
+	}
+	copy(out.items, n.items)
+	// Copy children
+	if cap(out.children) >= len(n.children) {
+		out.children = out.children[:len(n.children)]
+	} else {
+		out.children = make(children, len(n.children), cap(n.children))
+	}
+	copy(out.children, n.children)
+	return out
+}
+
+func (n *node) mutableChild(i int) *node {
+	c := n.children[i].mutableFor(n.cow)
+	n.children[i] = c
+	return c
+}
+
+// split splits the given node at the given index.  The current node shrinks,
+// and this function returns the item that existed at that index and a new node
+// containing all items/children after it.
+func (n *node) split(i int) (Item, *node) {
+	item := n.items[i]
+	next := n.cow.newNode()
+	next.items = append(next.items, n.items[i+1:]...)
+	n.items.truncate(i)
+	if len(n.children) > 0 {
+		next.children = append(next.children, n.children[i+1:]...)
+		n.children.truncate(i + 1)
+	}
+	return item, next
+}
+
+// maybeSplitChild checks if a child should be split, and if so splits it.
+// Returns whether or not a split occurred.
+func (n *node) maybeSplitChild(i, maxItems int) bool {
+	if len(n.children[i].items) < maxItems {
+		return false
+	}
+	first := n.mutableChild(i)
+	item, second := first.split(maxItems / 2)
+	n.items.insertAt(i, item)
+	n.children.insertAt(i+1, second)
+	return true
+}
+
+// insert inserts an item into the subtree rooted at this node, making sure
+// no nodes in the subtree exceed maxItems items.  Should an equivalent item be
+// be found/replaced by insert, it will be returned.
+func (n *node) insert(item Item, maxItems int) Item {
+	i, found := n.items.find(item)
+	if found {
+		out := n.items[i]
+		n.items[i] = item
+		return out
+	}
+	if len(n.children) == 0 {
+		n.items.insertAt(i, item)
+		return nil
+	}
+	if n.maybeSplitChild(i, maxItems) {
+		inTree := n.items[i]
+		switch {
+		case item.Less(inTree):
+			// no change, we want first split node
+		case inTree.Less(item):
+			i++ // we want second split node
+		default:
+			out := n.items[i]
+			n.items[i] = item
+			return out
+		}
+	}
+	return n.mutableChild(i).insert(item, maxItems)
+}
+
+// get finds the given key in the subtree and returns it.
+func (n *node) get(key Item) Item {
+	i, found := n.items.find(key)
+	if found {
+		return n.items[i]
+	} else if len(n.children) > 0 {
+		return n.children[i].get(key)
+	}
+	return nil
+}
+
+// min returns the first item in the subtree.
+func min(n *node) Item {
+	if n == nil {
+		return nil
+	}
+	for len(n.children) > 0 {
+		n = n.children[0]
+	}
+	if len(n.items) == 0 {
+		return nil
+	}
+	return n.items[0]
+}
+
+// max returns the last item in the subtree.
+func max(n *node) Item {
+	if n == nil {
+		return nil
+	}
+	for len(n.children) > 0 {
+		n = n.children[len(n.children)-1]
+	}
+	if len(n.items) == 0 {
+		return nil
+	}
+	return n.items[len(n.items)-1]
+}
+
+// toRemove details what item to remove in a node.remove call.
+type toRemove int
+
+const (
+	removeItem toRemove = iota // removes the given item
+	removeMin                  // removes smallest item in the subtree
+	removeMax                  // removes largest item in the subtree
+)
+
+// remove removes an item from the subtree rooted at this node.
+func (n *node) remove(item Item, minItems int, typ toRemove) Item {
+	var i int
+	var found bool
+	switch typ {
+	case removeMax:
+		if len(n.children) == 0 {
+			return n.items.pop()
+		}
+		i = len(n.items)
+	case removeMin:
+		if len(n.children) == 0 {
+			return n.items.removeAt(0)
+		}
+		i = 0
+	case removeItem:
+		i, found = n.items.find(item)
+		if len(n.children) == 0 {
+			if found {
+				return n.items.removeAt(i)
+			}
+			return nil
+		}
+	default:
+		panic("invalid type")
+	}
+	// If we get to here, we have children.
+	if len(n.children[i].items) <= minItems {
+		return n.growChildAndRemove(i, item, minItems, typ)
+	}
+	child := n.mutableChild(i)
+	// Either we had enough items to begin with, or we've done some
+	// merging/stealing, because we've got enough now and we're ready to return
+	// stuff.
+	if found {
+		// The item exists at index 'i', and the child we've selected can give us a
+		// predecessor, since if we've gotten here it's got > minItems items in it.
+		out := n.items[i]
+		// We use our special-case 'remove' call with typ=maxItem to pull the
+		// predecessor of item i (the rightmost leaf of our immediate left child)
+		// and set it into where we pulled the item from.
+		n.items[i] = child.remove(nil, minItems, removeMax)
+		return out
+	}
+	// Final recursive call.  Once we're here, we know that the item isn't in this
+	// node and that the child is big enough to remove from.
+	return child.remove(item, minItems, typ)
+}
+
+// growChildAndRemove grows child 'i' to make sure it's possible to remove an
+// item from it while keeping it at minItems, then calls remove to actually
+// remove it.
+//
+// Most documentation says we have to do two sets of special casing:
+//   1) item is in this node
+//   2) item is in child
+// In both cases, we need to handle the two subcases:
+//   A) node has enough values that it can spare one
+//   B) node doesn't have enough values
+// For the latter, we have to check:
+//   a) left sibling has node to spare
+//   b) right sibling has node to spare
+//   c) we must merge
+// To simplify our code here, we handle cases #1 and #2 the same:
+// If a node doesn't have enough items, we make sure it does (using a,b,c).
+// We then simply redo our remove call, and the second time (regardless of
+// whether we're in case 1 or 2), we'll have enough items and can guarantee
+// that we hit case A.
+func (n *node) growChildAndRemove(i int, item Item, minItems int, typ toRemove) Item {
+	if i > 0 && len(n.children[i-1].items) > minItems {
+		// Steal from left child
+		child := n.mutableChild(i)
+		stealFrom := n.mutableChild(i - 1)
+		stolenItem := stealFrom.items.pop()
+		child.items.insertAt(0, n.items[i-1])
+		n.items[i-1] = stolenItem
+		if len(stealFrom.children) > 0 {
+			child.children.insertAt(0, stealFrom.children.pop())
+		}
+	} else if i < len(n.items) && len(n.children[i+1].items) > minItems {
+		// steal from right child
+		child := n.mutableChild(i)
+		stealFrom := n.mutableChild(i + 1)
+		stolenItem := stealFrom.items.removeAt(0)
+		child.items = append(child.items, n.items[i])
+		n.items[i] = stolenItem
+		if len(stealFrom.children) > 0 {
+			child.children = append(child.children, stealFrom.children.removeAt(0))
+		}
+	} else {
+		if i >= len(n.items) {
+			i--
+		}
+		child := n.mutableChild(i)
+		// merge with right child
+		mergeItem := n.items.removeAt(i)
+		mergeChild := n.children.removeAt(i + 1)
+		child.items = append(child.items, mergeItem)
+		child.items = append(child.items, mergeChild.items...)
+		child.children = append(child.children, mergeChild.children...)
+		n.cow.freeNode(mergeChild)
+	}
+	return n.remove(item, minItems, typ)
+}
+
+type direction int
+
+const (
+	descend = direction(-1)
+	ascend  = direction(+1)
+)
+
+// iterate provides a simple method for iterating over elements in the tree.
+//
+// When ascending, the 'start' should be less than 'stop' and when descending,
+// the 'start' should be greater than 'stop'. Setting 'includeStart' to true
+// will force the iterator to include the first item when it equals 'start',
+// thus creating a "greaterOrEqual" or "lessThanEqual" rather than just a
+// "greaterThan" or "lessThan" queries.
+func (n *node) iterate(dir direction, start, stop Item, includeStart bool, hit bool, iter ItemIterator) (bool, bool) {
+	var ok, found bool
+	var index int
+	switch dir {
+	case ascend:
+		if start != nil {
+			index, _ = n.items.find(start)
+		}
+		for i := index; i < len(n.items); i++ {
+			if len(n.children) > 0 {
+				if hit, ok = n.children[i].iterate(dir, start, stop, includeStart, hit, iter); !ok {
+					return hit, false
+				}
+			}
+			if !includeStart && !hit && start != nil && !start.Less(n.items[i]) {
+				hit = true
+				continue
+			}
+			hit = true
+			if stop != nil && !n.items[i].Less(stop) {
+				return hit, false
+			}
+			if !iter(n.items[i]) {
+				return hit, false
+			}
+		}
+		if len(n.children) > 0 {
+			if hit, ok = n.children[len(n.children)-1].iterate(dir, start, stop, includeStart, hit, iter); !ok {
+				return hit, false
+			}
+		}
+	case descend:
+		if start != nil {
+			index, found = n.items.find(start)
+			if !found {
+				index = index - 1
+			}
+		} else {
+			index = len(n.items) - 1
+		}
+		for i := index; i >= 0; i-- {
+			if start != nil && !n.items[i].Less(start) {
+				if !includeStart || hit || start.Less(n.items[i]) {
+					continue
+				}
+			}
+			if len(n.children) > 0 {
+				if hit, ok = n.children[i+1].iterate(dir, start, stop, includeStart, hit, iter); !ok {
+					return hit, false
+				}
+			}
+			if stop != nil && !stop.Less(n.items[i]) {
+				return hit, false //	continue
+			}
+			hit = true
+			if !iter(n.items[i]) {
+				return hit, false
+			}
+		}
+		if len(n.children) > 0 {
+			if hit, ok = n.children[0].iterate(dir, start, stop, includeStart, hit, iter); !ok {
+				return hit, false
+			}
+		}
+	}
+	return hit, true
+}
+
+// Used for testing/debugging purposes.
+func (n *node) print(w io.Writer, level int) {
+	fmt.Fprintf(w, "%sNODE:%v\n", strings.Repeat("  ", level), n.items)
+	for _, c := range n.children {
+		c.print(w, level+1)
+	}
+}
+
+// BTree is an implementation of a B-Tree.
+//
+// BTree stores Item instances in an ordered structure, allowing easy insertion,
+// removal, and iteration.
+//
+// Write operations are not safe for concurrent mutation by multiple
+// goroutines, but Read operations are.
+type BTree struct {
+	degree int
+	length int
+	root   *node
+	cow    *copyOnWriteContext
+}
+
+// copyOnWriteContext pointers determine node ownership... a tree with a write
+// context equivalent to a node's write context is allowed to modify that node.
+// A tree whose write context does not match a node's is not allowed to modify
+// it, and must create a new, writable copy (IE: it's a Clone).
+//
+// When doing any write operation, we maintain the invariant that the current
+// node's context is equal to the context of the tree that requested the write.
+// We do this by, before we descend into any node, creating a copy with the
+// correct context if the contexts don't match.
+//
+// Since the node we're currently visiting on any write has the requesting
+// tree's context, that node is modifiable in place.  Children of that node may
+// not share context, but before we descend into them, we'll make a mutable
+// copy.
+type copyOnWriteContext struct {
+	freelist *FreeList
+}
+
+// Clone clones the btree, lazily.  Clone should not be called concurrently,
+// but the original tree (t) and the new tree (t2) can be used concurrently
+// once the Clone call completes.
+//
+// The internal tree structure of b is marked read-only and shared between t and
+// t2.  Writes to both t and t2 use copy-on-write logic, creating new nodes
+// whenever one of b's original nodes would have been modified.  Read operations
+// should have no performance degredation.  Write operations for both t and t2
+// will initially experience minor slow-downs caused by additional allocs and
+// copies due to the aforementioned copy-on-write logic, but should converge to
+// the original performance characteristics of the original tree.
+func (t *BTree) Clone() (t2 *BTree) {
+	// Create two entirely new copy-on-write contexts.
+	// This operation effectively creates three trees:
+	//   the original, shared nodes (old b.cow)
+	//   the new b.cow nodes
+	//   the new out.cow nodes
+	cow1, cow2 := *t.cow, *t.cow
+	out := *t
+	t.cow = &cow1
+	out.cow = &cow2
+	return &out
+}
+
+// maxItems returns the max number of items to allow per node.
+func (t *BTree) maxItems() int {
+	return t.degree*2 - 1
+}
+
+// minItems returns the min number of items to allow per node (ignored for the
+// root node).
+func (t *BTree) minItems() int {
+	return t.degree - 1
+}
+
+func (c *copyOnWriteContext) newNode() (n *node) {
+	n = c.freelist.newNode()
+	n.cow = c
+	return
+}
+
+type freeType int
+
+const (
+	ftFreelistFull freeType = iota // node was freed (available for GC, not stored in freelist)
+	ftStored                       // node was stored in the freelist for later use
+	ftNotOwned                     // node was ignored by COW, since it's owned by another one
+)
+
+// freeNode frees a node within a given COW context, if it's owned by that
+// context.  It returns what happened to the node (see freeType const
+// documentation).
+func (c *copyOnWriteContext) freeNode(n *node) freeType {
+	if n.cow == c {
+		// clear to allow GC
+		n.items.truncate(0)
+		n.children.truncate(0)
+		n.cow = nil
+		if c.freelist.freeNode(n) {
+			return ftStored
+		} else {
+			return ftFreelistFull
+		}
+	} else {
+		return ftNotOwned
+	}
+}
+
+// ReplaceOrInsert adds the given item to the tree.  If an item in the tree
+// already equals the given one, it is removed from the tree and returned.
+// Otherwise, nil is returned.
+//
+// nil cannot be added to the tree (will panic).
+func (t *BTree) ReplaceOrInsert(item Item) Item {
+	if item == nil {
+		panic("nil item being added to BTree")
+	}
+	if t.root == nil {
+		t.root = t.cow.newNode()
+		t.root.items = append(t.root.items, item)
+		t.length++
+		return nil
+	} else {
+		t.root = t.root.mutableFor(t.cow)
+		if len(t.root.items) >= t.maxItems() {
+			item2, second := t.root.split(t.maxItems() / 2)
+			oldroot := t.root
+			t.root = t.cow.newNode()
+			t.root.items = append(t.root.items, item2)
+			t.root.children = append(t.root.children, oldroot, second)
+		}
+	}
+	out := t.root.insert(item, t.maxItems())
+	if out == nil {
+		t.length++
+	}
+	return out
+}
+
+// Delete removes an item equal to the passed in item from the tree, returning
+// it.  If no such item exists, returns nil.
+func (t *BTree) Delete(item Item) Item {
+	return t.deleteItem(item, removeItem)
+}
+
+// DeleteMin removes the smallest item in the tree and returns it.
+// If no such item exists, returns nil.
+func (t *BTree) DeleteMin() Item {
+	return t.deleteItem(nil, removeMin)
+}
+
+// DeleteMax removes the largest item in the tree and returns it.
+// If no such item exists, returns nil.
+func (t *BTree) DeleteMax() Item {
+	return t.deleteItem(nil, removeMax)
+}
+
+func (t *BTree) deleteItem(item Item, typ toRemove) Item {
+	if t.root == nil || len(t.root.items) == 0 {
+		return nil
+	}
+	t.root = t.root.mutableFor(t.cow)
+	out := t.root.remove(item, t.minItems(), typ)
+	if len(t.root.items) == 0 && len(t.root.children) > 0 {
+		oldroot := t.root
+		t.root = t.root.children[0]
+		t.cow.freeNode(oldroot)
+	}
+	if out != nil {
+		t.length--
+	}
+	return out
+}
+
+// AscendRange calls the iterator for every value in the tree within the range
+// [greaterOrEqual, lessThan), until iterator returns false.
+func (t *BTree) AscendRange(greaterOrEqual, lessThan Item, iterator ItemIterator) {
+	if t.root == nil {
+		return
+	}
+	t.root.iterate(ascend, greaterOrEqual, lessThan, true, false, iterator)
+}
+
+// AscendLessThan calls the iterator for every value in the tree within the range
+// [first, pivot), until iterator returns false.
+func (t *BTree) AscendLessThan(pivot Item, iterator ItemIterator) {
+	if t.root == nil {
+		return
+	}
+	t.root.iterate(ascend, nil, pivot, false, false, iterator)
+}
+
+// AscendGreaterOrEqual calls the iterator for every value in the tree within
+// the range [pivot, last], until iterator returns false.
+func (t *BTree) AscendGreaterOrEqual(pivot Item, iterator ItemIterator) {
+	if t.root == nil {
+		return
+	}
+	t.root.iterate(ascend, pivot, nil, true, false, iterator)
+}
+
+// Ascend calls the iterator for every value in the tree within the range
+// [first, last], until iterator returns false.
+func (t *BTree) Ascend(iterator ItemIterator) {
+	if t.root == nil {
+		return
+	}
+	t.root.iterate(ascend, nil, nil, false, false, iterator)
+}
+
+// DescendRange calls the iterator for every value in the tree within the range
+// [lessOrEqual, greaterThan), until iterator returns false.
+func (t *BTree) DescendRange(lessOrEqual, greaterThan Item, iterator ItemIterator) {
+	if t.root == nil {
+		return
+	}
+	t.root.iterate(descend, lessOrEqual, greaterThan, true, false, iterator)
+}
+
+// DescendLessOrEqual calls the iterator for every value in the tree within the range
+// [pivot, first], until iterator returns false.
+func (t *BTree) DescendLessOrEqual(pivot Item, iterator ItemIterator) {
+	if t.root == nil {
+		return
+	}
+	t.root.iterate(descend, pivot, nil, true, false, iterator)
+}
+
+// DescendGreaterThan calls the iterator for every value in the tree within
+// the range (pivot, last], until iterator returns false.
+func (t *BTree) DescendGreaterThan(pivot Item, iterator ItemIterator) {
+	if t.root == nil {
+		return
+	}
+	t.root.iterate(descend, nil, pivot, false, false, iterator)
+}
+
+// Descend calls the iterator for every value in the tree within the range
+// [last, first], until iterator returns false.
+func (t *BTree) Descend(iterator ItemIterator) {
+	if t.root == nil {
+		return
+	}
+	t.root.iterate(descend, nil, nil, false, false, iterator)
+}
+
+// Get looks for the key item in the tree, returning it.  It returns nil if
+// unable to find that item.
+func (t *BTree) Get(key Item) Item {
+	if t.root == nil {
+		return nil
+	}
+	return t.root.get(key)
+}
+
+// Min returns the smallest item in the tree, or nil if the tree is empty.
+func (t *BTree) Min() Item {
+	return min(t.root)
+}
+
+// Max returns the largest item in the tree, or nil if the tree is empty.
+func (t *BTree) Max() Item {
+	return max(t.root)
+}
+
+// Has returns true if the given key is in the tree.
+func (t *BTree) Has(key Item) bool {
+	return t.Get(key) != nil
+}
+
+// Len returns the number of items currently in the tree.
+func (t *BTree) Len() int {
+	return t.length
+}
+
+// Clear removes all items from the btree.  If addNodesToFreelist is true,
+// t's nodes are added to its freelist as part of this call, until the freelist
+// is full.  Otherwise, the root node is simply dereferenced and the subtree
+// left to Go's normal GC processes.
+//
+// This can be much faster
+// than calling Delete on all elements, because that requires finding/removing
+// each element in the tree and updating the tree accordingly.  It also is
+// somewhat faster than creating a new tree to replace the old one, because
+// nodes from the old tree are reclaimed into the freelist for use by the new
+// one, instead of being lost to the garbage collector.
+//
+// This call takes:
+//   O(1): when addNodesToFreelist is false, this is a single operation.
+//   O(1): when the freelist is already full, it breaks out immediately
+//   O(freelist size):  when the freelist is empty and the nodes are all owned
+//       by this tree, nodes are added to the freelist until full.
+//   O(tree size):  when all nodes are owned by another tree, all nodes are
+//       iterated over looking for nodes to add to the freelist, and due to
+//       ownership, none are.
+func (t *BTree) Clear(addNodesToFreelist bool) {
+	if t.root != nil && addNodesToFreelist {
+		t.root.reset(t.cow)
+	}
+	t.root, t.length = nil, 0
+}
+
+// reset returns a subtree to the freelist.  It breaks out immediately if the
+// freelist is full, since the only benefit of iterating is to fill that
+// freelist up.  Returns true if parent reset call should continue.
+func (n *node) reset(c *copyOnWriteContext) bool {
+	for _, child := range n.children {
+		if !child.reset(c) {
+			return false
+		}
+	}
+	return c.freeNode(n) != ftFreelistFull
+}
+
+// Int implements the Item interface for integers.
+type Int int
+
+// Less returns true if int(a) < int(b).
+func (a Int) Less(b Item) bool {
+	return a < b.(Int)
+}
diff --git a/vendor/github.com/google/gofuzz/README.md b/vendor/github.com/google/gofuzz/README.md
index 64869af3..386c2a45 100644
--- a/vendor/github.com/google/gofuzz/README.md
+++ b/vendor/github.com/google/gofuzz/README.md
@@ -3,7 +3,7 @@ gofuzz
 
 gofuzz is a library for populating go objects with random values.
 
-[![GoDoc](https://godoc.org/github.com/google/gofuzz?status.png)](https://godoc.org/github.com/google/gofuzz)
+[![GoDoc](https://godoc.org/github.com/google/gofuzz?status.svg)](https://godoc.org/github.com/google/gofuzz)
 [![Travis](https://travis-ci.org/google/gofuzz.svg?branch=master)](https://travis-ci.org/google/gofuzz)
 
 This is useful for testing:
diff --git a/vendor/github.com/google/gofuzz/fuzz.go b/vendor/github.com/google/gofuzz/fuzz.go
index 1dfa80a6..da0a5f93 100644
--- a/vendor/github.com/google/gofuzz/fuzz.go
+++ b/vendor/github.com/google/gofuzz/fuzz.go
@@ -20,6 +20,7 @@ import (
 	"fmt"
 	"math/rand"
 	"reflect"
+	"regexp"
 	"time"
 )
 
@@ -28,13 +29,14 @@ type fuzzFuncMap map[reflect.Type]reflect.Value
 
 // Fuzzer knows how to fill any object with random fields.
 type Fuzzer struct {
-	fuzzFuncs        fuzzFuncMap
-	defaultFuzzFuncs fuzzFuncMap
-	r                *rand.Rand
-	nilChance        float64
-	minElements      int
-	maxElements      int
-	maxDepth         int
+	fuzzFuncs         fuzzFuncMap
+	defaultFuzzFuncs  fuzzFuncMap
+	r                 *rand.Rand
+	nilChance         float64
+	minElements       int
+	maxElements       int
+	maxDepth          int
+	skipFieldPatterns []*regexp.Regexp
 }
 
 // New returns a new Fuzzer. Customize your Fuzzer further by calling Funcs,
@@ -150,6 +152,13 @@ func (f *Fuzzer) MaxDepth(d int) *Fuzzer {
 	return f
 }
 
+// Skip fields which match the supplied pattern. Call this multiple times if needed
+// This is useful to skip XXX_ fields generated by protobuf
+func (f *Fuzzer) SkipFieldsWithPattern(pattern *regexp.Regexp) *Fuzzer {
+	f.skipFieldPatterns = append(f.skipFieldPatterns, pattern)
+	return f
+}
+
 // Fuzz recursively fills all of obj's fields with something random.  First
 // this tries to find a custom fuzz function (see Funcs).  If there is no
 // custom function this tests whether the object implements fuzz.Interface and,
@@ -274,7 +283,17 @@ func (fc *fuzzerContext) doFuzz(v reflect.Value, flags uint64) {
 		v.Set(reflect.Zero(v.Type()))
 	case reflect.Struct:
 		for i := 0; i < v.NumField(); i++ {
-			fc.doFuzz(v.Field(i), 0)
+			skipField := false
+			fieldName := v.Type().Field(i).Name
+			for _, pattern := range fc.fuzzer.skipFieldPatterns {
+				if pattern.MatchString(fieldName) {
+					skipField = true
+					break
+				}
+			}
+			if !skipField {
+				fc.doFuzz(v.Field(i), 0)
+			}
 		}
 	case reflect.Chan:
 		fallthrough
diff --git a/vendor/github.com/gregjones/httpcache/.travis.yml b/vendor/github.com/gregjones/httpcache/.travis.yml
new file mode 100644
index 00000000..597bc999
--- /dev/null
+++ b/vendor/github.com/gregjones/httpcache/.travis.yml
@@ -0,0 +1,18 @@
+sudo: false
+language: go
+matrix:
+  allow_failures:
+    - go: master
+  fast_finish: true
+  include:
+    - go: 1.10.x
+    - go: 1.11.x
+      env: GOFMT=1
+    - go: master
+install:
+  - # Do nothing. This is needed to prevent default install action "go get -t -v ./..." from happening here (we want it to happen inside script step).
+script:
+  - go get -t -v ./...
+  - if test -n "${GOFMT}"; then gofmt -w -s . && git diff --exit-code; fi
+  - go tool vet .
+  - go test -v -race ./...
diff --git a/vendor/github.com/gregjones/httpcache/LICENSE.txt b/vendor/github.com/gregjones/httpcache/LICENSE.txt
new file mode 100644
index 00000000..81316beb
--- /dev/null
+++ b/vendor/github.com/gregjones/httpcache/LICENSE.txt
@@ -0,0 +1,7 @@
+Copyright © 2012 Greg Jones (greg.jones@gmail.com)
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the “Software”), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
\ No newline at end of file
diff --git a/vendor/github.com/gregjones/httpcache/README.md b/vendor/github.com/gregjones/httpcache/README.md
new file mode 100644
index 00000000..09c9e7c1
--- /dev/null
+++ b/vendor/github.com/gregjones/httpcache/README.md
@@ -0,0 +1,25 @@
+httpcache
+=========
+
+[![Build Status](https://travis-ci.org/gregjones/httpcache.svg?branch=master)](https://travis-ci.org/gregjones/httpcache) [![GoDoc](https://godoc.org/github.com/gregjones/httpcache?status.svg)](https://godoc.org/github.com/gregjones/httpcache)
+
+Package httpcache provides a http.RoundTripper implementation that works as a mostly [RFC 7234](https://tools.ietf.org/html/rfc7234) compliant cache for http responses.
+
+It is only suitable for use as a 'private' cache (i.e. for a web-browser or an API-client and not for a shared proxy).
+
+Cache Backends
+--------------
+
+- The built-in 'memory' cache stores responses in an in-memory map.
+- [`github.com/gregjones/httpcache/diskcache`](https://github.com/gregjones/httpcache/tree/master/diskcache) provides a filesystem-backed cache using the [diskv](https://github.com/peterbourgon/diskv) library.
+- [`github.com/gregjones/httpcache/memcache`](https://github.com/gregjones/httpcache/tree/master/memcache) provides memcache implementations, for both App Engine and 'normal' memcache servers.
+- [`sourcegraph.com/sourcegraph/s3cache`](https://sourcegraph.com/github.com/sourcegraph/s3cache) uses Amazon S3 for storage.
+- [`github.com/gregjones/httpcache/leveldbcache`](https://github.com/gregjones/httpcache/tree/master/leveldbcache) provides a filesystem-backed cache using [leveldb](https://github.com/syndtr/goleveldb/leveldb).
+- [`github.com/die-net/lrucache`](https://github.com/die-net/lrucache) provides an in-memory cache that will evict least-recently used entries.
+- [`github.com/die-net/lrucache/twotier`](https://github.com/die-net/lrucache/tree/master/twotier) allows caches to be combined, for example to use lrucache above with a persistent disk-cache.
+- [`github.com/birkelund/boltdbcache`](https://github.com/birkelund/boltdbcache) provides a BoltDB implementation (based on the [bbolt](https://github.com/coreos/bbolt) fork).
+
+License
+-------
+
+-	[MIT License](LICENSE.txt)
diff --git a/vendor/github.com/gregjones/httpcache/diskcache/diskcache.go b/vendor/github.com/gregjones/httpcache/diskcache/diskcache.go
new file mode 100644
index 00000000..42e3129d
--- /dev/null
+++ b/vendor/github.com/gregjones/httpcache/diskcache/diskcache.go
@@ -0,0 +1,61 @@
+// Package diskcache provides an implementation of httpcache.Cache that uses the diskv package
+// to supplement an in-memory map with persistent storage
+//
+package diskcache
+
+import (
+	"bytes"
+	"crypto/md5"
+	"encoding/hex"
+	"github.com/peterbourgon/diskv"
+	"io"
+)
+
+// Cache is an implementation of httpcache.Cache that supplements the in-memory map with persistent storage
+type Cache struct {
+	d *diskv.Diskv
+}
+
+// Get returns the response corresponding to key if present
+func (c *Cache) Get(key string) (resp []byte, ok bool) {
+	key = keyToFilename(key)
+	resp, err := c.d.Read(key)
+	if err != nil {
+		return []byte{}, false
+	}
+	return resp, true
+}
+
+// Set saves a response to the cache as key
+func (c *Cache) Set(key string, resp []byte) {
+	key = keyToFilename(key)
+	c.d.WriteStream(key, bytes.NewReader(resp), true)
+}
+
+// Delete removes the response with key from the cache
+func (c *Cache) Delete(key string) {
+	key = keyToFilename(key)
+	c.d.Erase(key)
+}
+
+func keyToFilename(key string) string {
+	h := md5.New()
+	io.WriteString(h, key)
+	return hex.EncodeToString(h.Sum(nil))
+}
+
+// New returns a new Cache that will store files in basePath
+func New(basePath string) *Cache {
+	return &Cache{
+		d: diskv.New(diskv.Options{
+			BasePath:     basePath,
+			CacheSizeMax: 100 * 1024 * 1024, // 100MB
+		}),
+	}
+}
+
+// NewWithDiskv returns a new Cache using the provided Diskv as underlying
+// storage.
+func NewWithDiskv(d *diskv.Diskv) *Cache {
+	return &Cache{d}
+}
diff --git a/vendor/github.com/gregjones/httpcache/httpcache.go b/vendor/github.com/gregjones/httpcache/httpcache.go
new file mode 100644
index 00000000..b41a63d1
--- /dev/null
+++ b/vendor/github.com/gregjones/httpcache/httpcache.go
@@ -0,0 +1,551 @@
+// Package httpcache provides a http.RoundTripper implementation that works as a
+// mostly RFC-compliant cache for http responses.
+//
+// It is only suitable for use as a 'private' cache (i.e. for a web-browser or an API-client
+// and not for a shared proxy).
+//
+package httpcache
+
+import (
+	"bufio"
+	"bytes"
+	"errors"
+	"io"
+	"io/ioutil"
+	"net/http"
+	"net/http/httputil"
+	"strings"
+	"sync"
+	"time"
+)
+
+const (
+	stale = iota
+	fresh
+	transparent
+	// XFromCache is the header added to responses that are returned from the cache
+	XFromCache = "X-From-Cache"
+)
+
+// A Cache interface is used by the Transport to store and retrieve responses.
+type Cache interface {
+	// Get returns the []byte representation of a cached response and a bool
+	// set to true if the value isn't empty
+	Get(key string) (responseBytes []byte, ok bool)
+	// Set stores the []byte representation of a response against a key
+	Set(key string, responseBytes []byte)
+	// Delete removes the value associated with the key
+	Delete(key string)
+}
+
+// cacheKey returns the cache key for req.
+func cacheKey(req *http.Request) string {
+	if req.Method == http.MethodGet {
+		return req.URL.String()
+	} else {
+		return req.Method + " " + req.URL.String()
+	}
+}
+
+// CachedResponse returns the cached http.Response for req if present, and nil
+// otherwise.
+func CachedResponse(c Cache, req *http.Request) (resp *http.Response, err error) {
+	cachedVal, ok := c.Get(cacheKey(req))
+	if !ok {
+		return
+	}
+
+	b := bytes.NewBuffer(cachedVal)
+	return http.ReadResponse(bufio.NewReader(b), req)
+}
+
+// MemoryCache is an implemtation of Cache that stores responses in an in-memory map.
+type MemoryCache struct {
+	mu    sync.RWMutex
+	items map[string][]byte
+}
+
+// Get returns the []byte representation of the response and true if present, false if not
+func (c *MemoryCache) Get(key string) (resp []byte, ok bool) {
+	c.mu.RLock()
+	resp, ok = c.items[key]
+	c.mu.RUnlock()
+	return resp, ok
+}
+
+// Set saves response resp to the cache with key
+func (c *MemoryCache) Set(key string, resp []byte) {
+	c.mu.Lock()
+	c.items[key] = resp
+	c.mu.Unlock()
+}
+
+// Delete removes key from the cache
+func (c *MemoryCache) Delete(key string) {
+	c.mu.Lock()
+	delete(c.items, key)
+	c.mu.Unlock()
+}
+
+// NewMemoryCache returns a new Cache that will store items in an in-memory map
+func NewMemoryCache() *MemoryCache {
+	c := &MemoryCache{items: map[string][]byte{}}
+	return c
+}
+
+// Transport is an implementation of http.RoundTripper that will return values from a cache
+// where possible (avoiding a network request) and will additionally add validators (etag/if-modified-since)
+// to repeated requests allowing servers to return 304 / Not Modified
+type Transport struct {
+	// The RoundTripper interface actually used to make requests
+	// If nil, http.DefaultTransport is used
+	Transport http.RoundTripper
+	Cache     Cache
+	// If true, responses returned from the cache will be given an extra header, X-From-Cache
+	MarkCachedResponses bool
+}
+
+// NewTransport returns a new Transport with the
+// provided Cache implementation and MarkCachedResponses set to true
+func NewTransport(c Cache) *Transport {
+	return &Transport{Cache: c, MarkCachedResponses: true}
+}
+
+// Client returns an *http.Client that caches responses.
+func (t *Transport) Client() *http.Client {
+	return &http.Client{Transport: t}
+}
+
+// varyMatches will return false unless all of the cached values for the headers listed in Vary
+// match the new request
+func varyMatches(cachedResp *http.Response, req *http.Request) bool {
+	for _, header := range headerAllCommaSepValues(cachedResp.Header, "vary") {
+		header = http.CanonicalHeaderKey(header)
+		if header != "" && req.Header.Get(header) != cachedResp.Header.Get("X-Varied-"+header) {
+			return false
+		}
+	}
+	return true
+}
+
+// RoundTrip takes a Request and returns a Response
+//
+// If there is a fresh Response already in cache, then it will be returned without connecting to
+// the server.
+//
+// If there is a stale Response, then any validators it contains will be set on the new request
+// to give the server a chance to respond with NotModified. If this happens, then the cached Response
+// will be returned.
+func (t *Transport) RoundTrip(req *http.Request) (resp *http.Response, err error) {
+	cacheKey := cacheKey(req)
+	cacheable := (req.Method == "GET" || req.Method == "HEAD") && req.Header.Get("range") == ""
+	var cachedResp *http.Response
+	if cacheable {
+		cachedResp, err = CachedResponse(t.Cache, req)
+	} else {
+		// Need to invalidate an existing value
+		t.Cache.Delete(cacheKey)
+	}
+
+	transport := t.Transport
+	if transport == nil {
+		transport = http.DefaultTransport
+	}
+
+	if cacheable && cachedResp != nil && err == nil {
+		if t.MarkCachedResponses {
+			cachedResp.Header.Set(XFromCache, "1")
+		}
+
+		if varyMatches(cachedResp, req) {
+			// Can only use cached value if the new request doesn't Vary significantly
+			freshness := getFreshness(cachedResp.Header, req.Header)
+			if freshness == fresh {
+				return cachedResp, nil
+			}
+
+			if freshness == stale {
+				var req2 *http.Request
+				// Add validators if caller hasn't already done so
+				etag := cachedResp.Header.Get("etag")
+				if etag != "" && req.Header.Get("etag") == "" {
+					req2 = cloneRequest(req)
+					req2.Header.Set("if-none-match", etag)
+				}
+				lastModified := cachedResp.Header.Get("last-modified")
+				if lastModified != "" && req.Header.Get("last-modified") == "" {
+					if req2 == nil {
+						req2 = cloneRequest(req)
+					}
+					req2.Header.Set("if-modified-since", lastModified)
+				}
+				if req2 != nil {
+					req = req2
+				}
+			}
+		}
+
+		resp, err = transport.RoundTrip(req)
+		if err == nil && req.Method == "GET" && resp.StatusCode == http.StatusNotModified {
+			// Replace the 304 response with the one from cache, but update with some new headers
+			endToEndHeaders := getEndToEndHeaders(resp.Header)
+			for _, header := range endToEndHeaders {
+				cachedResp.Header[header] = resp.Header[header]
+			}
+			resp = cachedResp
+		} else if (err != nil || (cachedResp != nil && resp.StatusCode >= 500)) &&
+			req.Method == "GET" && canStaleOnError(cachedResp.Header, req.Header) {
+			// In case of transport failure and stale-if-error activated, returns cached content
+			// when available
+			return cachedResp, nil
+		} else {
+			if err != nil || resp.StatusCode != http.StatusOK {
+				t.Cache.Delete(cacheKey)
+			}
+			if err != nil {
+				return nil, err
+			}
+		}
+	} else {
+		reqCacheControl := parseCacheControl(req.Header)
+		if _, ok := reqCacheControl["only-if-cached"]; ok {
+			resp = newGatewayTimeoutResponse(req)
+		} else {
+			resp, err = transport.RoundTrip(req)
+			if err != nil {
+				return nil, err
+			}
+		}
+	}
+
+	if cacheable && canStore(parseCacheControl(req.Header), parseCacheControl(resp.Header)) {
+		for _, varyKey := range headerAllCommaSepValues(resp.Header, "vary") {
+			varyKey = http.CanonicalHeaderKey(varyKey)
+			fakeHeader := "X-Varied-" + varyKey
+			reqValue := req.Header.Get(varyKey)
+			if reqValue != "" {
+				resp.Header.Set(fakeHeader, reqValue)
+			}
+		}
+		switch req.Method {
+		case "GET":
+			// Delay caching until EOF is reached.
+			resp.Body = &cachingReadCloser{
+				R: resp.Body,
+				OnEOF: func(r io.Reader) {
+					resp := *resp
+					resp.Body = ioutil.NopCloser(r)
+					respBytes, err := httputil.DumpResponse(&resp, true)
+					if err == nil {
+						t.Cache.Set(cacheKey, respBytes)
+					}
+				},
+			}
+		default:
+			respBytes, err := httputil.DumpResponse(resp, true)
+			if err == nil {
+				t.Cache.Set(cacheKey, respBytes)
+			}
+		}
+	} else {
+		t.Cache.Delete(cacheKey)
+	}
+	return resp, nil
+}
+
+// ErrNoDateHeader indicates that the HTTP headers contained no Date header.
+var ErrNoDateHeader = errors.New("no Date header")
+
+// Date parses and returns the value of the Date header.
+func Date(respHeaders http.Header) (date time.Time, err error) {
+	dateHeader := respHeaders.Get("date")
+	if dateHeader == "" {
+		err = ErrNoDateHeader
+		return
+	}
+
+	return time.Parse(time.RFC1123, dateHeader)
+}
+
+type realClock struct{}
+
+func (c *realClock) since(d time.Time) time.Duration {
+	return time.Since(d)
+}
+
+type timer interface {
+	since(d time.Time) time.Duration
+}
+
+var clock timer = &realClock{}
+
+// getFreshness will return one of fresh/stale/transparent based on the cache-control
+// values of the request and the response
+//
+// fresh indicates the response can be returned
+// stale indicates that the response needs validating before it is returned
+// transparent indicates the response should not be used to fulfil the request
+//
+// Because this is only a private cache, 'public' and 'private' in cache-control aren't
+// signficant. Similarly, smax-age isn't used.
+func getFreshness(respHeaders, reqHeaders http.Header) (freshness int) {
+	respCacheControl := parseCacheControl(respHeaders)
+	reqCacheControl := parseCacheControl(reqHeaders)
+	if _, ok := reqCacheControl["no-cache"]; ok {
+		return transparent
+	}
+	if _, ok := respCacheControl["no-cache"]; ok {
+		return stale
+	}
+	if _, ok := reqCacheControl["only-if-cached"]; ok {
+		return fresh
+	}
+
+	date, err := Date(respHeaders)
+	if err != nil {
+		return stale
+	}
+	currentAge := clock.since(date)
+
+	var lifetime time.Duration
+	var zeroDuration time.Duration
+
+	// If a response includes both an Expires header and a max-age directive,
+	// the max-age directive overrides the Expires header, even if the Expires header is more restrictive.
+	if maxAge, ok := respCacheControl["max-age"]; ok {
+		lifetime, err = time.ParseDuration(maxAge + "s")
+		if err != nil {
+			lifetime = zeroDuration
+		}
+	} else {
+		expiresHeader := respHeaders.Get("Expires")
+		if expiresHeader != "" {
+			expires, err := time.Parse(time.RFC1123, expiresHeader)
+			if err != nil {
+				lifetime = zeroDuration
+			} else {
+				lifetime = expires.Sub(date)
+			}
+		}
+	}
+
+	if maxAge, ok := reqCacheControl["max-age"]; ok {
+		// the client is willing to accept a response whose age is no greater than the specified time in seconds
+		lifetime, err = time.ParseDuration(maxAge + "s")
+		if err != nil {
+			lifetime = zeroDuration
+		}
+	}
+	if minfresh, ok := reqCacheControl["min-fresh"]; ok {
+		//  the client wants a response that will still be fresh for at least the specified number of seconds.
+		minfreshDuration, err := time.ParseDuration(minfresh + "s")
+		if err == nil {
+			currentAge = time.Duration(currentAge + minfreshDuration)
+		}
+	}
+
+	if maxstale, ok := reqCacheControl["max-stale"]; ok {
+		// Indicates that the client is willing to accept a response that has exceeded its expiration time.
+		// If max-stale is assigned a value, then the client is willing to accept a response that has exceeded
+		// its expiration time by no more than the specified number of seconds.
+		// If no value is assigned to max-stale, then the client is willing to accept a stale response of any age.
+		//
+		// Responses served only because of a max-stale value are supposed to have a Warning header added to them,
+		// but that seems like a  hassle, and is it actually useful? If so, then there needs to be a different
+		// return-value available here.
+		if maxstale == "" {
+			return fresh
+		}
+		maxstaleDuration, err := time.ParseDuration(maxstale + "s")
+		if err == nil {
+			currentAge = time.Duration(currentAge - maxstaleDuration)
+		}
+	}
+
+	if lifetime > currentAge {
+		return fresh
+	}
+
+	return stale
+}
+
+// Returns true if either the request or the response includes the stale-if-error
+// cache control extension: https://tools.ietf.org/html/rfc5861
+func canStaleOnError(respHeaders, reqHeaders http.Header) bool {
+	respCacheControl := parseCacheControl(respHeaders)
+	reqCacheControl := parseCacheControl(reqHeaders)
+
+	var err error
+	lifetime := time.Duration(-1)
+
+	if staleMaxAge, ok := respCacheControl["stale-if-error"]; ok {
+		if staleMaxAge != "" {
+			lifetime, err = time.ParseDuration(staleMaxAge + "s")
+			if err != nil {
+				return false
+			}
+		} else {
+			return true
+		}
+	}
+	if staleMaxAge, ok := reqCacheControl["stale-if-error"]; ok {
+		if staleMaxAge != "" {
+			lifetime, err = time.ParseDuration(staleMaxAge + "s")
+			if err != nil {
+				return false
+			}
+		} else {
+			return true
+		}
+	}
+
+	if lifetime >= 0 {
+		date, err := Date(respHeaders)
+		if err != nil {
+			return false
+		}
+		currentAge := clock.since(date)
+		if lifetime > currentAge {
+			return true
+		}
+	}
+
+	return false
+}
+
+func getEndToEndHeaders(respHeaders http.Header) []string {
+	// These headers are always hop-by-hop
+	hopByHopHeaders := map[string]struct{}{
+		"Connection":          {},
+		"Keep-Alive":          {},
+		"Proxy-Authenticate":  {},
+		"Proxy-Authorization": {},
+		"Te":                  {},
+		"Trailers":            {},
+		"Transfer-Encoding":   {},
+		"Upgrade":             {},
+	}
+
+	for _, extra := range strings.Split(respHeaders.Get("connection"), ",") {
+		// any header listed in connection, if present, is also considered hop-by-hop
+		if strings.Trim(extra, " ") != "" {
+			hopByHopHeaders[http.CanonicalHeaderKey(extra)] = struct{}{}
+		}
+	}
+	endToEndHeaders := []string{}
+	for respHeader := range respHeaders {
+		if _, ok := hopByHopHeaders[respHeader]; !ok {
+			endToEndHeaders = append(endToEndHeaders, respHeader)
+		}
+	}
+	return endToEndHeaders
+}
+
+func canStore(reqCacheControl, respCacheControl cacheControl) (canStore bool) {
+	if _, ok := respCacheControl["no-store"]; ok {
+		return false
+	}
+	if _, ok := reqCacheControl["no-store"]; ok {
+		return false
+	}
+	return true
+}
+
+func newGatewayTimeoutResponse(req *http.Request) *http.Response {
+	var braw bytes.Buffer
+	braw.WriteString("HTTP/1.1 504 Gateway Timeout\r\n\r\n")
+	resp, err := http.ReadResponse(bufio.NewReader(&braw), req)
+	if err != nil {
+		panic(err)
+	}
+	return resp
+}
+
+// cloneRequest returns a clone of the provided *http.Request.
+// The clone is a shallow copy of the struct and its Header map.
+// (This function copyright goauth2 authors: https://code.google.com/p/goauth2)
+func cloneRequest(r *http.Request) *http.Request {
+	// shallow copy of the struct
+	r2 := new(http.Request)
+	*r2 = *r
+	// deep copy of the Header
+	r2.Header = make(http.Header)
+	for k, s := range r.Header {
+		r2.Header[k] = s
+	}
+	return r2
+}
+
+type cacheControl map[string]string
+
+func parseCacheControl(headers http.Header) cacheControl {
+	cc := cacheControl{}
+	ccHeader := headers.Get("Cache-Control")
+	for _, part := range strings.Split(ccHeader, ",") {
+		part = strings.Trim(part, " ")
+		if part == "" {
+			continue
+		}
+		if strings.ContainsRune(part, '=') {
+			keyval := strings.Split(part, "=")
+			cc[strings.Trim(keyval[0], " ")] = strings.Trim(keyval[1], ",")
+		} else {
+			cc[part] = ""
+		}
+	}
+	return cc
+}
+
+// headerAllCommaSepValues returns all comma-separated values (each
+// with whitespace trimmed) for header name in headers. According to
+// Section 4.2 of the HTTP/1.1 spec
+// (http://www.w3.org/Protocols/rfc2616/rfc2616-sec4.html#sec4.2),
+// values from multiple occurrences of a header should be concatenated, if
+// the header's value is a comma-separated list.
+func headerAllCommaSepValues(headers http.Header, name string) []string {
+	var vals []string
+	for _, val := range headers[http.CanonicalHeaderKey(name)] {
+		fields := strings.Split(val, ",")
+		for i, f := range fields {
+			fields[i] = strings.TrimSpace(f)
+		}
+		vals = append(vals, fields...)
+	}
+	return vals
+}
+
+// cachingReadCloser is a wrapper around ReadCloser R that calls OnEOF
+// handler with a full copy of the content read from R when EOF is
+// reached.
+type cachingReadCloser struct {
+	// Underlying ReadCloser.
+	R io.ReadCloser
+	// OnEOF is called with a copy of the content of R when EOF is reached.
+	OnEOF func(io.Reader)
+
+	buf bytes.Buffer // buf stores a copy of the content of R.
+}
+
+// Read reads the next len(p) bytes from R or until R is drained. The
+// return value n is the number of bytes read. If R has no data to
+// return, err is io.EOF and OnEOF is called with a full copy of what
+// has been read so far.
+func (r *cachingReadCloser) Read(p []byte) (n int, err error) {
+	n, err = r.R.Read(p)
+	r.buf.Write(p[:n])
+	if err == io.EOF {
+		r.OnEOF(bytes.NewReader(r.buf.Bytes()))
+	}
+	return n, err
+}
+
+func (r *cachingReadCloser) Close() error {
+	return r.R.Close()
+}
+
+// NewMemoryCacheTransport returns a new Transport using the in-memory cache implementation
+func NewMemoryCacheTransport() *Transport {
+	c := NewMemoryCache()
+	t := NewTransport(c)
+	return t
+}
diff --git a/vendor/github.com/inconshreveable/mousetrap/LICENSE b/vendor/github.com/inconshreveable/mousetrap/LICENSE
new file mode 100644
index 00000000..5f0d1fb6
--- /dev/null
+++ b/vendor/github.com/inconshreveable/mousetrap/LICENSE
@@ -0,0 +1,13 @@
+Copyright 2014 Alan Shreve
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
diff --git a/vendor/github.com/inconshreveable/mousetrap/README.md b/vendor/github.com/inconshreveable/mousetrap/README.md
new file mode 100644
index 00000000..7a950d17
--- /dev/null
+++ b/vendor/github.com/inconshreveable/mousetrap/README.md
@@ -0,0 +1,23 @@
+# mousetrap
+
+mousetrap is a tiny library that answers a single question.
+
+On a Windows machine, was the process invoked by someone double clicking on
+the executable file while browsing in explorer?
+
+### Motivation
+
+Windows developers unfamiliar with command line tools will often "double-click"
+the executable for a tool. Because most CLI tools print the help and then exit
+when invoked without arguments, this is often very frustrating for those users.
+
+mousetrap provides a way to detect these invocations so that you can provide
+more helpful behavior and instructions on how to run the CLI tool. To see what
+this looks like, both from an organizational and a technical perspective, see
+https://inconshreveable.com/09-09-2014/sweat-the-small-stuff/
+
+### The interface
+
+The library exposes a single interface:
+
+    func StartedByExplorer() (bool)
diff --git a/vendor/github.com/inconshreveable/mousetrap/trap_others.go b/vendor/github.com/inconshreveable/mousetrap/trap_others.go
new file mode 100644
index 00000000..9d2d8a4b
--- /dev/null
+++ b/vendor/github.com/inconshreveable/mousetrap/trap_others.go
@@ -0,0 +1,15 @@
+// +build !windows
+
+package mousetrap
+
+// StartedByExplorer returns true if the program was invoked by the user
+// double-clicking on the executable from explorer.exe
+//
+// It is conservative and returns false if any of the internal calls fail.
+// It does not guarantee that the program was run from a terminal. It only can tell you
+// whether it was launched from explorer.exe
+//
+// On non-Windows platforms, it always returns false.
+func StartedByExplorer() bool {
+	return false
+}
diff --git a/vendor/github.com/inconshreveable/mousetrap/trap_windows.go b/vendor/github.com/inconshreveable/mousetrap/trap_windows.go
new file mode 100644
index 00000000..336142a5
--- /dev/null
+++ b/vendor/github.com/inconshreveable/mousetrap/trap_windows.go
@@ -0,0 +1,98 @@
+// +build windows
+// +build !go1.4
+
+package mousetrap
+
+import (
+	"fmt"
+	"os"
+	"syscall"
+	"unsafe"
+)
+
+const (
+	// defined by the Win32 API
+	th32cs_snapprocess uintptr = 0x2
+)
+
+var (
+	kernel                   = syscall.MustLoadDLL("kernel32.dll")
+	CreateToolhelp32Snapshot = kernel.MustFindProc("CreateToolhelp32Snapshot")
+	Process32First           = kernel.MustFindProc("Process32FirstW")
+	Process32Next            = kernel.MustFindProc("Process32NextW")
+)
+
+// ProcessEntry32 structure defined by the Win32 API
+type processEntry32 struct {
+	dwSize              uint32
+	cntUsage            uint32
+	th32ProcessID       uint32
+	th32DefaultHeapID   int
+	th32ModuleID        uint32
+	cntThreads          uint32
+	th32ParentProcessID uint32
+	pcPriClassBase      int32
+	dwFlags             uint32
+	szExeFile           [syscall.MAX_PATH]uint16
+}
+
+func getProcessEntry(pid int) (pe *processEntry32, err error) {
+	snapshot, _, e1 := CreateToolhelp32Snapshot.Call(th32cs_snapprocess, uintptr(0))
+	if snapshot == uintptr(syscall.InvalidHandle) {
+		err = fmt.Errorf("CreateToolhelp32Snapshot: %v", e1)
+		return
+	}
+	defer syscall.CloseHandle(syscall.Handle(snapshot))
+
+	var processEntry processEntry32
+	processEntry.dwSize = uint32(unsafe.Sizeof(processEntry))
+	ok, _, e1 := Process32First.Call(snapshot, uintptr(unsafe.Pointer(&processEntry)))
+	if ok == 0 {
+		err = fmt.Errorf("Process32First: %v", e1)
+		return
+	}
+
+	for {
+		if processEntry.th32ProcessID == uint32(pid) {
+			pe = &processEntry
+			return
+		}
+
+		ok, _, e1 = Process32Next.Call(snapshot, uintptr(unsafe.Pointer(&processEntry)))
+		if ok == 0 {
+			err = fmt.Errorf("Process32Next: %v", e1)
+			return
+		}
+	}
+}
+
+func getppid() (pid int, err error) {
+	pe, err := getProcessEntry(os.Getpid())
+	if err != nil {
+		return
+	}
+
+	pid = int(pe.th32ParentProcessID)
+	return
+}
+
+// StartedByExplorer returns true if the program was invoked by the user double-clicking
+// on the executable from explorer.exe
+//
+// It is conservative and returns false if any of the internal calls fail.
+// It does not guarantee that the program was run from a terminal. It only can tell you
+// whether it was launched from explorer.exe
+func StartedByExplorer() bool {
+	ppid, err := getppid()
+	if err != nil {
+		return false
+	}
+
+	pe, err := getProcessEntry(ppid)
+	if err != nil {
+		return false
+	}
+
+	name := syscall.UTF16ToString(pe.szExeFile[:])
+	return name == "explorer.exe"
+}
diff --git a/vendor/github.com/inconshreveable/mousetrap/trap_windows_1.4.go b/vendor/github.com/inconshreveable/mousetrap/trap_windows_1.4.go
new file mode 100644
index 00000000..9a28e57c
--- /dev/null
+++ b/vendor/github.com/inconshreveable/mousetrap/trap_windows_1.4.go
@@ -0,0 +1,46 @@
+// +build windows
+// +build go1.4
+
+package mousetrap
+
+import (
+	"os"
+	"syscall"
+	"unsafe"
+)
+
+func getProcessEntry(pid int) (*syscall.ProcessEntry32, error) {
+	snapshot, err := syscall.CreateToolhelp32Snapshot(syscall.TH32CS_SNAPPROCESS, 0)
+	if err != nil {
+		return nil, err
+	}
+	defer syscall.CloseHandle(snapshot)
+	var procEntry syscall.ProcessEntry32
+	procEntry.Size = uint32(unsafe.Sizeof(procEntry))
+	if err = syscall.Process32First(snapshot, &procEntry); err != nil {
+		return nil, err
+	}
+	for {
+		if procEntry.ProcessID == uint32(pid) {
+			return &procEntry, nil
+		}
+		err = syscall.Process32Next(snapshot, &procEntry)
+		if err != nil {
+			return nil, err
+		}
+	}
+}
+
+// StartedByExplorer returns true if the program was invoked by the user double-clicking
+// on the executable from explorer.exe
+//
+// It is conservative and returns false if any of the internal calls fail.
+// It does not guarantee that the program was run from a terminal. It only can tell you
+// whether it was launched from explorer.exe
+func StartedByExplorer() bool {
+	pe, err := getProcessEntry(os.Getppid())
+	if err != nil {
+		return false
+	}
+	return "explorer.exe" == syscall.UTF16ToString(pe.ExeFile[:])
+}
diff --git a/vendor/github.com/liggitt/tabwriter/.travis.yml b/vendor/github.com/liggitt/tabwriter/.travis.yml
new file mode 100644
index 00000000..2768dc07
--- /dev/null
+++ b/vendor/github.com/liggitt/tabwriter/.travis.yml
@@ -0,0 +1,11 @@
+language: go
+
+go:
+  - "1.8"
+  - "1.9"
+  - "1.10"
+  - "1.11"
+  - "1.12"
+  - master
+
+script: go test -v ./...
diff --git a/vendor/github.com/liggitt/tabwriter/LICENSE b/vendor/github.com/liggitt/tabwriter/LICENSE
new file mode 100644
index 00000000..6a66aea5
--- /dev/null
+++ b/vendor/github.com/liggitt/tabwriter/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2009 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+   * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+   * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+   * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/liggitt/tabwriter/README.md b/vendor/github.com/liggitt/tabwriter/README.md
new file mode 100644
index 00000000..e75d3567
--- /dev/null
+++ b/vendor/github.com/liggitt/tabwriter/README.md
@@ -0,0 +1,7 @@
+This repo is a drop-in replacement for the golang [text/tabwriter](https://golang.org/pkg/text/tabwriter/) package.
+
+It is based on that package at [cf2c2ea8](https://github.com/golang/go/tree/cf2c2ea89d09d486bb018b1817c5874388038c3a/src/text/tabwriter) and inherits its license.
+
+The following additional features are supported:
+* `RememberWidths` flag allows remembering maximum widths seen per column even after Flush() is called.
+* `RememberedWidths() []int` and `SetRememberedWidths([]int) *Writer` allows obtaining and transferring remembered column width between writers.
diff --git a/vendor/github.com/liggitt/tabwriter/tabwriter.go b/vendor/github.com/liggitt/tabwriter/tabwriter.go
new file mode 100644
index 00000000..fd3431fb
--- /dev/null
+++ b/vendor/github.com/liggitt/tabwriter/tabwriter.go
@@ -0,0 +1,637 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package tabwriter implements a write filter (tabwriter.Writer) that
+// translates tabbed columns in input into properly aligned text.
+//
+// It is a drop-in replacement for the golang text/tabwriter package (https://golang.org/pkg/text/tabwriter),
+// based on that package at https://github.com/golang/go/tree/cf2c2ea89d09d486bb018b1817c5874388038c3a
+// with support for additional features.
+//
+// The package is using the Elastic Tabstops algorithm described at
+// http://nickgravgaard.com/elastictabstops/index.html.
+package tabwriter
+
+import (
+	"io"
+	"unicode/utf8"
+)
+
+// ----------------------------------------------------------------------------
+// Filter implementation
+
+// A cell represents a segment of text terminated by tabs or line breaks.
+// The text itself is stored in a separate buffer; cell only describes the
+// segment's size in bytes, its width in runes, and whether it's an htab
+// ('\t') terminated cell.
+//
+type cell struct {
+	size  int  // cell size in bytes
+	width int  // cell width in runes
+	htab  bool // true if the cell is terminated by an htab ('\t')
+}
+
+// A Writer is a filter that inserts padding around tab-delimited
+// columns in its input to align them in the output.
+//
+// The Writer treats incoming bytes as UTF-8-encoded text consisting
+// of cells terminated by horizontal ('\t') or vertical ('\v') tabs,
+// and newline ('\n') or formfeed ('\f') characters; both newline and
+// formfeed act as line breaks.
+//
+// Tab-terminated cells in contiguous lines constitute a column. The
+// Writer inserts padding as needed to make all cells in a column have
+// the same width, effectively aligning the columns. It assumes that
+// all characters have the same width, except for tabs for which a
+// tabwidth must be specified. Column cells must be tab-terminated, not
+// tab-separated: non-tab terminated trailing text at the end of a line
+// forms a cell but that cell is not part of an aligned column.
+// For instance, in this example (where | stands for a horizontal tab):
+//
+//	aaaa|bbb|d
+//	aa  |b  |dd
+//	a   |
+//	aa  |cccc|eee
+//
+// the b and c are in distinct columns (the b column is not contiguous
+// all the way). The d and e are not in a column at all (there's no
+// terminating tab, nor would the column be contiguous).
+//
+// The Writer assumes that all Unicode code points have the same width;
+// this may not be true in some fonts or if the string contains combining
+// characters.
+//
+// If DiscardEmptyColumns is set, empty columns that are terminated
+// entirely by vertical (or "soft") tabs are discarded. Columns
+// terminated by horizontal (or "hard") tabs are not affected by
+// this flag.
+//
+// If a Writer is configured to filter HTML, HTML tags and entities
+// are passed through. The widths of tags and entities are
+// assumed to be zero (tags) and one (entities) for formatting purposes.
+//
+// A segment of text may be escaped by bracketing it with Escape
+// characters. The tabwriter passes escaped text segments through
+// unchanged. In particular, it does not interpret any tabs or line
+// breaks within the segment. If the StripEscape flag is set, the
+// Escape characters are stripped from the output; otherwise they
+// are passed through as well. For the purpose of formatting, the
+// width of the escaped text is always computed excluding the Escape
+// characters.
+//
+// The formfeed character acts like a newline but it also terminates
+// all columns in the current line (effectively calling Flush). Tab-
+// terminated cells in the next line start new columns. Unless found
+// inside an HTML tag or inside an escaped text segment, formfeed
+// characters appear as newlines in the output.
+//
+// The Writer must buffer input internally, because proper spacing
+// of one line may depend on the cells in future lines. Clients must
+// call Flush when done calling Write.
+//
+type Writer struct {
+	// configuration
+	output   io.Writer
+	minwidth int
+	tabwidth int
+	padding  int
+	padbytes [8]byte
+	flags    uint
+
+	// current state
+	buf     []byte   // collected text excluding tabs or line breaks
+	pos     int      // buffer position up to which cell.width of incomplete cell has been computed
+	cell    cell     // current incomplete cell; cell.width is up to buf[pos] excluding ignored sections
+	endChar byte     // terminating char of escaped sequence (Escape for escapes, '>', ';' for HTML tags/entities, or 0)
+	lines   [][]cell // list of lines; each line is a list of cells
+	widths  []int    // list of column widths in runes - re-used during formatting
+
+	maxwidths []int // list of max column widths in runes
+}
+
+// addLine adds a new line.
+// flushed is a hint indicating whether the underlying writer was just flushed.
+// If so, the previous line is not likely to be a good indicator of the new line's cells.
+func (b *Writer) addLine(flushed bool) {
+	// Grow slice instead of appending,
+	// as that gives us an opportunity
+	// to re-use an existing []cell.
+	if n := len(b.lines) + 1; n <= cap(b.lines) {
+		b.lines = b.lines[:n]
+		b.lines[n-1] = b.lines[n-1][:0]
+	} else {
+		b.lines = append(b.lines, nil)
+	}
+
+	if !flushed {
+		// The previous line is probably a good indicator
+		// of how many cells the current line will have.
+		// If the current line's capacity is smaller than that,
+		// abandon it and make a new one.
+		if n := len(b.lines); n >= 2 {
+			if prev := len(b.lines[n-2]); prev > cap(b.lines[n-1]) {
+				b.lines[n-1] = make([]cell, 0, prev)
+			}
+		}
+	}
+}
+
+// Reset the current state.
+func (b *Writer) reset() {
+	b.buf = b.buf[:0]
+	b.pos = 0
+	b.cell = cell{}
+	b.endChar = 0
+	b.lines = b.lines[0:0]
+	b.widths = b.widths[0:0]
+	b.addLine(true)
+}
+
+// Internal representation (current state):
+//
+// - all text written is appended to buf; tabs and line breaks are stripped away
+// - at any given time there is a (possibly empty) incomplete cell at the end
+//   (the cell starts after a tab or line break)
+// - cell.size is the number of bytes belonging to the cell so far
+// - cell.width is text width in runes of that cell from the start of the cell to
+//   position pos; html tags and entities are excluded from this width if html
+//   filtering is enabled
+// - the sizes and widths of processed text are kept in the lines list
+//   which contains a list of cells for each line
+// - the widths list is a temporary list with current widths used during
+//   formatting; it is kept in Writer because it's re-used
+//
+//                    |<---------- size ---------->|
+//                    |                            |
+//                    |<- width ->|<- ignored ->|  |
+//                    |           |             |  |
+// [---processed---tab------------<tag>...</tag>...]
+// ^                  ^                         ^
+// |                  |                         |
+// buf                start of incomplete cell  pos
+
+// Formatting can be controlled with these flags.
+const (
+	// Ignore html tags and treat entities (starting with '&'
+	// and ending in ';') as single characters (width = 1).
+	FilterHTML uint = 1 << iota
+
+	// Strip Escape characters bracketing escaped text segments
+	// instead of passing them through unchanged with the text.
+	StripEscape
+
+	// Force right-alignment of cell content.
+	// Default is left-alignment.
+	AlignRight
+
+	// Handle empty columns as if they were not present in
+	// the input in the first place.
+	DiscardEmptyColumns
+
+	// Always use tabs for indentation columns (i.e., padding of
+	// leading empty cells on the left) independent of padchar.
+	TabIndent
+
+	// Print a vertical bar ('|') between columns (after formatting).
+	// Discarded columns appear as zero-width columns ("||").
+	Debug
+
+	// Remember maximum widths seen per column even after Flush() is called.
+	RememberWidths
+)
+
+// A Writer must be initialized with a call to Init. The first parameter (output)
+// specifies the filter output. The remaining parameters control the formatting:
+//
+//	minwidth	minimal cell width including any padding
+//	tabwidth	width of tab characters (equivalent number of spaces)
+//	padding		padding added to a cell before computing its width
+//	padchar		ASCII char used for padding
+//			if padchar == '\t', the Writer will assume that the
+//			width of a '\t' in the formatted output is tabwidth,
+//			and cells are left-aligned independent of align_left
+//			(for correct-looking results, tabwidth must correspond
+//			to the tab width in the viewer displaying the result)
+//	flags		formatting control
+//
+func (b *Writer) Init(output io.Writer, minwidth, tabwidth, padding int, padchar byte, flags uint) *Writer {
+	if minwidth < 0 || tabwidth < 0 || padding < 0 {
+		panic("negative minwidth, tabwidth, or padding")
+	}
+	b.output = output
+	b.minwidth = minwidth
+	b.tabwidth = tabwidth
+	b.padding = padding
+	for i := range b.padbytes {
+		b.padbytes[i] = padchar
+	}
+	if padchar == '\t' {
+		// tab padding enforces left-alignment
+		flags &^= AlignRight
+	}
+	b.flags = flags
+
+	b.reset()
+
+	return b
+}
+
+// debugging support (keep code around)
+func (b *Writer) dump() {
+	pos := 0
+	for i, line := range b.lines {
+		print("(", i, ") ")
+		for _, c := range line {
+			print("[", string(b.buf[pos:pos+c.size]), "]")
+			pos += c.size
+		}
+		print("\n")
+	}
+	print("\n")
+}
+
+// local error wrapper so we can distinguish errors we want to return
+// as errors from genuine panics (which we don't want to return as errors)
+type osError struct {
+	err error
+}
+
+func (b *Writer) write0(buf []byte) {
+	n, err := b.output.Write(buf)
+	if n != len(buf) && err == nil {
+		err = io.ErrShortWrite
+	}
+	if err != nil {
+		panic(osError{err})
+	}
+}
+
+func (b *Writer) writeN(src []byte, n int) {
+	for n > len(src) {
+		b.write0(src)
+		n -= len(src)
+	}
+	b.write0(src[0:n])
+}
+
+var (
+	newline = []byte{'\n'}
+	tabs    = []byte("\t\t\t\t\t\t\t\t")
+)
+
+func (b *Writer) writePadding(textw, cellw int, useTabs bool) {
+	if b.padbytes[0] == '\t' || useTabs {
+		// padding is done with tabs
+		if b.tabwidth == 0 {
+			return // tabs have no width - can't do any padding
+		}
+		// make cellw the smallest multiple of b.tabwidth
+		cellw = (cellw + b.tabwidth - 1) / b.tabwidth * b.tabwidth
+		n := cellw - textw // amount of padding
+		if n < 0 {
+			panic("internal error")
+		}
+		b.writeN(tabs, (n+b.tabwidth-1)/b.tabwidth)
+		return
+	}
+
+	// padding is done with non-tab characters
+	b.writeN(b.padbytes[0:], cellw-textw)
+}
+
+var vbar = []byte{'|'}
+
+func (b *Writer) writeLines(pos0 int, line0, line1 int) (pos int) {
+	pos = pos0
+	for i := line0; i < line1; i++ {
+		line := b.lines[i]
+
+		// if TabIndent is set, use tabs to pad leading empty cells
+		useTabs := b.flags&TabIndent != 0
+
+		for j, c := range line {
+			if j > 0 && b.flags&Debug != 0 {
+				// indicate column break
+				b.write0(vbar)
+			}
+
+			if c.size == 0 {
+				// empty cell
+				if j < len(b.widths) {
+					b.writePadding(c.width, b.widths[j], useTabs)
+				}
+			} else {
+				// non-empty cell
+				useTabs = false
+				if b.flags&AlignRight == 0 { // align left
+					b.write0(b.buf[pos : pos+c.size])
+					pos += c.size
+					if j < len(b.widths) {
+						b.writePadding(c.width, b.widths[j], false)
+					}
+				} else { // align right
+					if j < len(b.widths) {
+						b.writePadding(c.width, b.widths[j], false)
+					}
+					b.write0(b.buf[pos : pos+c.size])
+					pos += c.size
+				}
+			}
+		}
+
+		if i+1 == len(b.lines) {
+			// last buffered line - we don't have a newline, so just write
+			// any outstanding buffered data
+			b.write0(b.buf[pos : pos+b.cell.size])
+			pos += b.cell.size
+		} else {
+			// not the last line - write newline
+			b.write0(newline)
+		}
+	}
+	return
+}
+
+// Format the text between line0 and line1 (excluding line1); pos
+// is the buffer position corresponding to the beginning of line0.
+// Returns the buffer position corresponding to the beginning of
+// line1 and an error, if any.
+//
+func (b *Writer) format(pos0 int, line0, line1 int) (pos int) {
+	pos = pos0
+	column := len(b.widths)
+	for this := line0; this < line1; this++ {
+		line := b.lines[this]
+
+		if column >= len(line)-1 {
+			continue
+		}
+		// cell exists in this column => this line
+		// has more cells than the previous line
+		// (the last cell per line is ignored because cells are
+		// tab-terminated; the last cell per line describes the
+		// text before the newline/formfeed and does not belong
+		// to a column)
+
+		// print unprinted lines until beginning of block
+		pos = b.writeLines(pos, line0, this)
+		line0 = this
+
+		// column block begin
+		width := b.minwidth // minimal column width
+		discardable := true // true if all cells in this column are empty and "soft"
+		for ; this < line1; this++ {
+			line = b.lines[this]
+			if column >= len(line)-1 {
+				break
+			}
+			// cell exists in this column
+			c := line[column]
+			// update width
+			if w := c.width + b.padding; w > width {
+				width = w
+			}
+			// update discardable
+			if c.width > 0 || c.htab {
+				discardable = false
+			}
+		}
+		// column block end
+
+		// discard empty columns if necessary
+		if discardable && b.flags&DiscardEmptyColumns != 0 {
+			width = 0
+		}
+
+		if b.flags&RememberWidths != 0 {
+			if len(b.maxwidths) < len(b.widths) {
+				b.maxwidths = append(b.maxwidths, b.widths[len(b.maxwidths):]...)
+			}
+
+			switch {
+			case len(b.maxwidths) == len(b.widths):
+				b.maxwidths = append(b.maxwidths, width)
+			case b.maxwidths[len(b.widths)] > width:
+				width = b.maxwidths[len(b.widths)]
+			case b.maxwidths[len(b.widths)] < width:
+				b.maxwidths[len(b.widths)] = width
+			}
+		}
+
+		// format and print all columns to the right of this column
+		// (we know the widths of this column and all columns to the left)
+		b.widths = append(b.widths, width) // push width
+		pos = b.format(pos, line0, this)
+		b.widths = b.widths[0 : len(b.widths)-1] // pop width
+		line0 = this
+	}
+
+	// print unprinted lines until end
+	return b.writeLines(pos, line0, line1)
+}
+
+// Append text to current cell.
+func (b *Writer) append(text []byte) {
+	b.buf = append(b.buf, text...)
+	b.cell.size += len(text)
+}
+
+// Update the cell width.
+func (b *Writer) updateWidth() {
+	b.cell.width += utf8.RuneCount(b.buf[b.pos:])
+	b.pos = len(b.buf)
+}
+
+// To escape a text segment, bracket it with Escape characters.
+// For instance, the tab in this string "Ignore this tab: \xff\t\xff"
+// does not terminate a cell and constitutes a single character of
+// width one for formatting purposes.
+//
+// The value 0xff was chosen because it cannot appear in a valid UTF-8 sequence.
+//
+const Escape = '\xff'
+
+// Start escaped mode.
+func (b *Writer) startEscape(ch byte) {
+	switch ch {
+	case Escape:
+		b.endChar = Escape
+	case '<':
+		b.endChar = '>'
+	case '&':
+		b.endChar = ';'
+	}
+}
+
+// Terminate escaped mode. If the escaped text was an HTML tag, its width
+// is assumed to be zero for formatting purposes; if it was an HTML entity,
+// its width is assumed to be one. In all other cases, the width is the
+// unicode width of the text.
+//
+func (b *Writer) endEscape() {
+	switch b.endChar {
+	case Escape:
+		b.updateWidth()
+		if b.flags&StripEscape == 0 {
+			b.cell.width -= 2 // don't count the Escape chars
+		}
+	case '>': // tag of zero width
+	case ';':
+		b.cell.width++ // entity, count as one rune
+	}
+	b.pos = len(b.buf)
+	b.endChar = 0
+}
+
+// Terminate the current cell by adding it to the list of cells of the
+// current line. Returns the number of cells in that line.
+//
+func (b *Writer) terminateCell(htab bool) int {
+	b.cell.htab = htab
+	line := &b.lines[len(b.lines)-1]
+	*line = append(*line, b.cell)
+	b.cell = cell{}
+	return len(*line)
+}
+
+func handlePanic(err *error, op string) {
+	if e := recover(); e != nil {
+		if nerr, ok := e.(osError); ok {
+			*err = nerr.err
+			return
+		}
+		panic("tabwriter: panic during " + op)
+	}
+}
+
+// RememberedWidths returns a copy of the remembered per-column maximum widths.
+// Requires use of the RememberWidths flag, and is not threadsafe.
+func (b *Writer) RememberedWidths() []int {
+	retval := make([]int, len(b.maxwidths))
+	copy(retval, b.maxwidths)
+	return retval
+}
+
+// SetRememberedWidths sets the remembered per-column maximum widths.
+// Requires use of the RememberWidths flag, and is not threadsafe.
+func (b *Writer) SetRememberedWidths(widths []int) *Writer {
+	b.maxwidths = make([]int, len(widths))
+	copy(b.maxwidths, widths)
+	return b
+}
+
+// Flush should be called after the last call to Write to ensure
+// that any data buffered in the Writer is written to output. Any
+// incomplete escape sequence at the end is considered
+// complete for formatting purposes.
+func (b *Writer) Flush() error {
+	return b.flush()
+}
+
+func (b *Writer) flush() (err error) {
+	defer b.reset() // even in the presence of errors
+	defer handlePanic(&err, "Flush")
+
+	// add current cell if not empty
+	if b.cell.size > 0 {
+		if b.endChar != 0 {
+			// inside escape - terminate it even if incomplete
+			b.endEscape()
+		}
+		b.terminateCell(false)
+	}
+
+	// format contents of buffer
+	b.format(0, 0, len(b.lines))
+	return nil
+}
+
+var hbar = []byte("---\n")
+
+// Write writes buf to the writer b.
+// The only errors returned are ones encountered
+// while writing to the underlying output stream.
+//
+func (b *Writer) Write(buf []byte) (n int, err error) {
+	defer handlePanic(&err, "Write")
+
+	// split text into cells
+	n = 0
+	for i, ch := range buf {
+		if b.endChar == 0 {
+			// outside escape
+			switch ch {
+			case '\t', '\v', '\n', '\f':
+				// end of cell
+				b.append(buf[n:i])
+				b.updateWidth()
+				n = i + 1 // ch consumed
+				ncells := b.terminateCell(ch == '\t')
+				if ch == '\n' || ch == '\f' {
+					// terminate line
+					b.addLine(ch == '\f')
+					if ch == '\f' || ncells == 1 {
+						// A '\f' always forces a flush. Otherwise, if the previous
+						// line has only one cell which does not have an impact on
+						// the formatting of the following lines (the last cell per
+						// line is ignored by format()), thus we can flush the
+						// Writer contents.
+						if err = b.Flush(); err != nil {
+							return
+						}
+						if ch == '\f' && b.flags&Debug != 0 {
+							// indicate section break
+							b.write0(hbar)
+						}
+					}
+				}
+
+			case Escape:
+				// start of escaped sequence
+				b.append(buf[n:i])
+				b.updateWidth()
+				n = i
+				if b.flags&StripEscape != 0 {
+					n++ // strip Escape
+				}
+				b.startEscape(Escape)
+
+			case '<', '&':
+				// possibly an html tag/entity
+				if b.flags&FilterHTML != 0 {
+					// begin of tag/entity
+					b.append(buf[n:i])
+					b.updateWidth()
+					n = i
+					b.startEscape(ch)
+				}
+			}
+
+		} else {
+			// inside escape
+			if ch == b.endChar {
+				// end of tag/entity
+				j := i + 1
+				if ch == Escape && b.flags&StripEscape != 0 {
+					j = i // strip Escape
+				}
+				b.append(buf[n:j])
+				n = i + 1 // ch consumed
+				b.endEscape()
+			}
+		}
+	}
+
+	// append leftover text
+	b.append(buf[n:])
+	n = len(buf)
+	return
+}
+
+// NewWriter allocates and initializes a new tabwriter.Writer.
+// The parameters are the same as for the Init function.
+//
+func NewWriter(output io.Writer, minwidth, tabwidth, padding int, padchar byte, flags uint) *Writer {
+	return new(Writer).Init(output, minwidth, tabwidth, padding, padchar, flags)
+}
diff --git a/vendor/github.com/mailru/easyjson/LICENSE b/vendor/github.com/mailru/easyjson/LICENSE
new file mode 100644
index 00000000..fbff658f
--- /dev/null
+++ b/vendor/github.com/mailru/easyjson/LICENSE
@@ -0,0 +1,7 @@
+Copyright (c) 2016 Mail.Ru Group
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/github.com/mailru/easyjson/buffer/pool.go b/vendor/github.com/mailru/easyjson/buffer/pool.go
new file mode 100644
index 00000000..07fb4bc1
--- /dev/null
+++ b/vendor/github.com/mailru/easyjson/buffer/pool.go
@@ -0,0 +1,270 @@
+// Package buffer implements a buffer for serialization, consisting of a chain of []byte-s to
+// reduce copying and to allow reuse of individual chunks.
+package buffer
+
+import (
+	"io"
+	"sync"
+)
+
+// PoolConfig contains configuration for the allocation and reuse strategy.
+type PoolConfig struct {
+	StartSize  int // Minimum chunk size that is allocated.
+	PooledSize int // Minimum chunk size that is reused, reusing chunks too small will result in overhead.
+	MaxSize    int // Maximum chunk size that will be allocated.
+}
+
+var config = PoolConfig{
+	StartSize:  128,
+	PooledSize: 512,
+	MaxSize:    32768,
+}
+
+// Reuse pool: chunk size -> pool.
+var buffers = map[int]*sync.Pool{}
+
+func initBuffers() {
+	for l := config.PooledSize; l <= config.MaxSize; l *= 2 {
+		buffers[l] = new(sync.Pool)
+	}
+}
+
+func init() {
+	initBuffers()
+}
+
+// Init sets up a non-default pooling and allocation strategy. Should be run before serialization is done.
+func Init(cfg PoolConfig) {
+	config = cfg
+	initBuffers()
+}
+
+// putBuf puts a chunk to reuse pool if it can be reused.
+func putBuf(buf []byte) {
+	size := cap(buf)
+	if size < config.PooledSize {
+		return
+	}
+	if c := buffers[size]; c != nil {
+		c.Put(buf[:0])
+	}
+}
+
+// getBuf gets a chunk from reuse pool or creates a new one if reuse failed.
+func getBuf(size int) []byte {
+	if size < config.PooledSize {
+		return make([]byte, 0, size)
+	}
+
+	if c := buffers[size]; c != nil {
+		v := c.Get()
+		if v != nil {
+			return v.([]byte)
+		}
+	}
+	return make([]byte, 0, size)
+}
+
+// Buffer is a buffer optimized for serialization without extra copying.
+type Buffer struct {
+
+	// Buf is the current chunk that can be used for serialization.
+	Buf []byte
+
+	toPool []byte
+	bufs   [][]byte
+}
+
+// EnsureSpace makes sure that the current chunk contains at least s free bytes,
+// possibly creating a new chunk.
+func (b *Buffer) EnsureSpace(s int) {
+	if cap(b.Buf)-len(b.Buf) >= s {
+		return
+	}
+	l := len(b.Buf)
+	if l > 0 {
+		if cap(b.toPool) != cap(b.Buf) {
+			// Chunk was reallocated, toPool can be pooled.
+			putBuf(b.toPool)
+		}
+		if cap(b.bufs) == 0 {
+			b.bufs = make([][]byte, 0, 8)
+		}
+		b.bufs = append(b.bufs, b.Buf)
+		l = cap(b.toPool) * 2
+	} else {
+		l = config.StartSize
+	}
+
+	if l > config.MaxSize {
+		l = config.MaxSize
+	}
+	b.Buf = getBuf(l)
+	b.toPool = b.Buf
+}
+
+// AppendByte appends a single byte to buffer.
+func (b *Buffer) AppendByte(data byte) {
+	if cap(b.Buf) == len(b.Buf) { // EnsureSpace won't be inlined.
+		b.EnsureSpace(1)
+	}
+	b.Buf = append(b.Buf, data)
+}
+
+// AppendBytes appends a byte slice to buffer.
+func (b *Buffer) AppendBytes(data []byte) {
+	for len(data) > 0 {
+		if cap(b.Buf) == len(b.Buf) { // EnsureSpace won't be inlined.
+			b.EnsureSpace(1)
+		}
+
+		sz := cap(b.Buf) - len(b.Buf)
+		if sz > len(data) {
+			sz = len(data)
+		}
+
+		b.Buf = append(b.Buf, data[:sz]...)
+		data = data[sz:]
+	}
+}
+
+// AppendBytes appends a string to buffer.
+func (b *Buffer) AppendString(data string) {
+	for len(data) > 0 {
+		if cap(b.Buf) == len(b.Buf) { // EnsureSpace won't be inlined.
+			b.EnsureSpace(1)
+		}
+
+		sz := cap(b.Buf) - len(b.Buf)
+		if sz > len(data) {
+			sz = len(data)
+		}
+
+		b.Buf = append(b.Buf, data[:sz]...)
+		data = data[sz:]
+	}
+}
+
+// Size computes the size of a buffer by adding sizes of every chunk.
+func (b *Buffer) Size() int {
+	size := len(b.Buf)
+	for _, buf := range b.bufs {
+		size += len(buf)
+	}
+	return size
+}
+
+// DumpTo outputs the contents of a buffer to a writer and resets the buffer.
+func (b *Buffer) DumpTo(w io.Writer) (written int, err error) {
+	var n int
+	for _, buf := range b.bufs {
+		if err == nil {
+			n, err = w.Write(buf)
+			written += n
+		}
+		putBuf(buf)
+	}
+
+	if err == nil {
+		n, err = w.Write(b.Buf)
+		written += n
+	}
+	putBuf(b.toPool)
+
+	b.bufs = nil
+	b.Buf = nil
+	b.toPool = nil
+
+	return
+}
+
+// BuildBytes creates a single byte slice with all the contents of the buffer. Data is
+// copied if it does not fit in a single chunk. You can optionally provide one byte
+// slice as argument that it will try to reuse.
+func (b *Buffer) BuildBytes(reuse ...[]byte) []byte {
+	if len(b.bufs) == 0 {
+		ret := b.Buf
+		b.toPool = nil
+		b.Buf = nil
+		return ret
+	}
+
+	var ret []byte
+	size := b.Size()
+
+	// If we got a buffer as argument and it is big enought, reuse it.
+	if len(reuse) == 1 && cap(reuse[0]) >= size {
+		ret = reuse[0][:0]
+	} else {
+		ret = make([]byte, 0, size)
+	}
+	for _, buf := range b.bufs {
+		ret = append(ret, buf...)
+		putBuf(buf)
+	}
+
+	ret = append(ret, b.Buf...)
+	putBuf(b.toPool)
+
+	b.bufs = nil
+	b.toPool = nil
+	b.Buf = nil
+
+	return ret
+}
+
+type readCloser struct {
+	offset int
+	bufs   [][]byte
+}
+
+func (r *readCloser) Read(p []byte) (n int, err error) {
+	for _, buf := range r.bufs {
+		// Copy as much as we can.
+		x := copy(p[n:], buf[r.offset:])
+		n += x // Increment how much we filled.
+
+		// Did we empty the whole buffer?
+		if r.offset+x == len(buf) {
+			// On to the next buffer.
+			r.offset = 0
+			r.bufs = r.bufs[1:]
+
+			// We can release this buffer.
+			putBuf(buf)
+		} else {
+			r.offset += x
+		}
+
+		if n == len(p) {
+			break
+		}
+	}
+	// No buffers left or nothing read?
+	if len(r.bufs) == 0 {
+		err = io.EOF
+	}
+	return
+}
+
+func (r *readCloser) Close() error {
+	// Release all remaining buffers.
+	for _, buf := range r.bufs {
+		putBuf(buf)
+	}
+	// In case Close gets called multiple times.
+	r.bufs = nil
+
+	return nil
+}
+
+// ReadCloser creates an io.ReadCloser with all the contents of the buffer.
+func (b *Buffer) ReadCloser() io.ReadCloser {
+	ret := &readCloser{0, append(b.bufs, b.Buf)}
+
+	b.bufs = nil
+	b.toPool = nil
+	b.Buf = nil
+
+	return ret
+}
diff --git a/vendor/github.com/mailru/easyjson/jlexer/bytestostr.go b/vendor/github.com/mailru/easyjson/jlexer/bytestostr.go
new file mode 100644
index 00000000..ff7b27c5
--- /dev/null
+++ b/vendor/github.com/mailru/easyjson/jlexer/bytestostr.go
@@ -0,0 +1,24 @@
+// This file will only be included to the build if neither
+// easyjson_nounsafe nor appengine build tag is set. See README notes
+// for more details.
+
+//+build !easyjson_nounsafe
+//+build !appengine
+
+package jlexer
+
+import (
+	"reflect"
+	"unsafe"
+)
+
+// bytesToStr creates a string pointing at the slice to avoid copying.
+//
+// Warning: the string returned by the function should be used with care, as the whole input data
+// chunk may be either blocked from being freed by GC because of a single string or the buffer.Data
+// may be garbage-collected even when the string exists.
+func bytesToStr(data []byte) string {
+	h := (*reflect.SliceHeader)(unsafe.Pointer(&data))
+	shdr := reflect.StringHeader{Data: h.Data, Len: h.Len}
+	return *(*string)(unsafe.Pointer(&shdr))
+}
diff --git a/vendor/github.com/mailru/easyjson/jlexer/bytestostr_nounsafe.go b/vendor/github.com/mailru/easyjson/jlexer/bytestostr_nounsafe.go
new file mode 100644
index 00000000..864d1be6
--- /dev/null
+++ b/vendor/github.com/mailru/easyjson/jlexer/bytestostr_nounsafe.go
@@ -0,0 +1,13 @@
+// This file is included to the build if any of the buildtags below
+// are defined. Refer to README notes for more details.
+
+//+build easyjson_nounsafe appengine
+
+package jlexer
+
+// bytesToStr creates a string normally from []byte
+//
+// Note that this method is roughly 1.5x slower than using the 'unsafe' method.
+func bytesToStr(data []byte) string {
+	return string(data)
+}
diff --git a/vendor/github.com/mailru/easyjson/jlexer/error.go b/vendor/github.com/mailru/easyjson/jlexer/error.go
new file mode 100644
index 00000000..e90ec40d
--- /dev/null
+++ b/vendor/github.com/mailru/easyjson/jlexer/error.go
@@ -0,0 +1,15 @@
+package jlexer
+
+import "fmt"
+
+// LexerError implements the error interface and represents all possible errors that can be
+// generated during parsing the JSON data.
+type LexerError struct {
+	Reason string
+	Offset int
+	Data   string
+}
+
+func (l *LexerError) Error() string {
+	return fmt.Sprintf("parse error: %s near offset %d of '%s'", l.Reason, l.Offset, l.Data)
+}
diff --git a/vendor/github.com/mailru/easyjson/jlexer/lexer.go b/vendor/github.com/mailru/easyjson/jlexer/lexer.go
new file mode 100644
index 00000000..ddd376b8
--- /dev/null
+++ b/vendor/github.com/mailru/easyjson/jlexer/lexer.go
@@ -0,0 +1,1182 @@
+// Package jlexer contains a JSON lexer implementation.
+//
+// It is expected that it is mostly used with generated parser code, so the interface is tuned
+// for a parser that knows what kind of data is expected.
+package jlexer
+
+import (
+	"encoding/base64"
+	"encoding/json"
+	"errors"
+	"fmt"
+	"io"
+	"strconv"
+	"unicode"
+	"unicode/utf16"
+	"unicode/utf8"
+)
+
+// tokenKind determines type of a token.
+type tokenKind byte
+
+const (
+	tokenUndef  tokenKind = iota // No token.
+	tokenDelim                   // Delimiter: one of '{', '}', '[' or ']'.
+	tokenString                  // A string literal, e.g. "abc\u1234"
+	tokenNumber                  // Number literal, e.g. 1.5e5
+	tokenBool                    // Boolean literal: true or false.
+	tokenNull                    // null keyword.
+)
+
+// token describes a single token: type, position in the input and value.
+type token struct {
+	kind tokenKind // Type of a token.
+
+	boolValue  bool   // Value if a boolean literal token.
+	byteValue  []byte // Raw value of a token.
+	delimValue byte
+}
+
+// Lexer is a JSON lexer: it iterates over JSON tokens in a byte slice.
+type Lexer struct {
+	Data []byte // Input data given to the lexer.
+
+	start int   // Start of the current token.
+	pos   int   // Current unscanned position in the input stream.
+	token token // Last scanned token, if token.kind != tokenUndef.
+
+	firstElement bool // Whether current element is the first in array or an object.
+	wantSep      byte // A comma or a colon character, which need to occur before a token.
+
+	UseMultipleErrors bool          // If we want to use multiple errors.
+	fatalError        error         // Fatal error occurred during lexing. It is usually a syntax error.
+	multipleErrors    []*LexerError // Semantic errors occurred during lexing. Marshalling will be continued after finding this errors.
+}
+
+// FetchToken scans the input for the next token.
+func (r *Lexer) FetchToken() {
+	r.token.kind = tokenUndef
+	r.start = r.pos
+
+	// Check if r.Data has r.pos element
+	// If it doesn't, it mean corrupted input data
+	if len(r.Data) < r.pos {
+		r.errParse("Unexpected end of data")
+		return
+	}
+	// Determine the type of a token by skipping whitespace and reading the
+	// first character.
+	for _, c := range r.Data[r.pos:] {
+		switch c {
+		case ':', ',':
+			if r.wantSep == c {
+				r.pos++
+				r.start++
+				r.wantSep = 0
+			} else {
+				r.errSyntax()
+			}
+
+		case ' ', '\t', '\r', '\n':
+			r.pos++
+			r.start++
+
+		case '"':
+			if r.wantSep != 0 {
+				r.errSyntax()
+			}
+
+			r.token.kind = tokenString
+			r.fetchString()
+			return
+
+		case '{', '[':
+			if r.wantSep != 0 {
+				r.errSyntax()
+			}
+			r.firstElement = true
+			r.token.kind = tokenDelim
+			r.token.delimValue = r.Data[r.pos]
+			r.pos++
+			return
+
+		case '}', ']':
+			if !r.firstElement && (r.wantSep != ',') {
+				r.errSyntax()
+			}
+			r.wantSep = 0
+			r.token.kind = tokenDelim
+			r.token.delimValue = r.Data[r.pos]
+			r.pos++
+			return
+
+		case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '-':
+			if r.wantSep != 0 {
+				r.errSyntax()
+			}
+			r.token.kind = tokenNumber
+			r.fetchNumber()
+			return
+
+		case 'n':
+			if r.wantSep != 0 {
+				r.errSyntax()
+			}
+
+			r.token.kind = tokenNull
+			r.fetchNull()
+			return
+
+		case 't':
+			if r.wantSep != 0 {
+				r.errSyntax()
+			}
+
+			r.token.kind = tokenBool
+			r.token.boolValue = true
+			r.fetchTrue()
+			return
+
+		case 'f':
+			if r.wantSep != 0 {
+				r.errSyntax()
+			}
+
+			r.token.kind = tokenBool
+			r.token.boolValue = false
+			r.fetchFalse()
+			return
+
+		default:
+			r.errSyntax()
+			return
+		}
+	}
+	r.fatalError = io.EOF
+	return
+}
+
+// isTokenEnd returns true if the char can follow a non-delimiter token
+func isTokenEnd(c byte) bool {
+	return c == ' ' || c == '\t' || c == '\r' || c == '\n' || c == '[' || c == ']' || c == '{' || c == '}' || c == ',' || c == ':'
+}
+
+// fetchNull fetches and checks remaining bytes of null keyword.
+func (r *Lexer) fetchNull() {
+	r.pos += 4
+	if r.pos > len(r.Data) ||
+		r.Data[r.pos-3] != 'u' ||
+		r.Data[r.pos-2] != 'l' ||
+		r.Data[r.pos-1] != 'l' ||
+		(r.pos != len(r.Data) && !isTokenEnd(r.Data[r.pos])) {
+
+		r.pos -= 4
+		r.errSyntax()
+	}
+}
+
+// fetchTrue fetches and checks remaining bytes of true keyword.
+func (r *Lexer) fetchTrue() {
+	r.pos += 4
+	if r.pos > len(r.Data) ||
+		r.Data[r.pos-3] != 'r' ||
+		r.Data[r.pos-2] != 'u' ||
+		r.Data[r.pos-1] != 'e' ||
+		(r.pos != len(r.Data) && !isTokenEnd(r.Data[r.pos])) {
+
+		r.pos -= 4
+		r.errSyntax()
+	}
+}
+
+// fetchFalse fetches and checks remaining bytes of false keyword.
+func (r *Lexer) fetchFalse() {
+	r.pos += 5
+	if r.pos > len(r.Data) ||
+		r.Data[r.pos-4] != 'a' ||
+		r.Data[r.pos-3] != 'l' ||
+		r.Data[r.pos-2] != 's' ||
+		r.Data[r.pos-1] != 'e' ||
+		(r.pos != len(r.Data) && !isTokenEnd(r.Data[r.pos])) {
+
+		r.pos -= 5
+		r.errSyntax()
+	}
+}
+
+// fetchNumber scans a number literal token.
+func (r *Lexer) fetchNumber() {
+	hasE := false
+	afterE := false
+	hasDot := false
+
+	r.pos++
+	for i, c := range r.Data[r.pos:] {
+		switch {
+		case c >= '0' && c <= '9':
+			afterE = false
+		case c == '.' && !hasDot:
+			hasDot = true
+		case (c == 'e' || c == 'E') && !hasE:
+			hasE = true
+			hasDot = true
+			afterE = true
+		case (c == '+' || c == '-') && afterE:
+			afterE = false
+		default:
+			r.pos += i
+			if !isTokenEnd(c) {
+				r.errSyntax()
+			} else {
+				r.token.byteValue = r.Data[r.start:r.pos]
+			}
+			return
+		}
+	}
+
+	r.pos = len(r.Data)
+	r.token.byteValue = r.Data[r.start:]
+}
+
+// findStringLen tries to scan into the string literal for ending quote char to determine required size.
+// The size will be exact if no escapes are present and may be inexact if there are escaped chars.
+func findStringLen(data []byte) (isValid, hasEscapes bool, length int) {
+	delta := 0
+
+	for i := 0; i < len(data); i++ {
+		switch data[i] {
+		case '\\':
+			i++
+			delta++
+			if i < len(data) && data[i] == 'u' {
+				delta++
+			}
+		case '"':
+			return true, (delta > 0), (i - delta)
+		}
+	}
+
+	return false, false, len(data)
+}
+
+// getu4 decodes \uXXXX from the beginning of s, returning the hex value,
+// or it returns -1.
+func getu4(s []byte) rune {
+	if len(s) < 6 || s[0] != '\\' || s[1] != 'u' {
+		return -1
+	}
+	var val rune
+	for i := 2; i < len(s) && i < 6; i++ {
+		var v byte
+		c := s[i]
+		switch c {
+		case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
+			v = c - '0'
+		case 'a', 'b', 'c', 'd', 'e', 'f':
+			v = c - 'a' + 10
+		case 'A', 'B', 'C', 'D', 'E', 'F':
+			v = c - 'A' + 10
+		default:
+			return -1
+		}
+
+		val <<= 4
+		val |= rune(v)
+	}
+	return val
+}
+
+// processEscape processes a single escape sequence and returns number of bytes processed.
+func (r *Lexer) processEscape(data []byte) (int, error) {
+	if len(data) < 2 {
+		return 0, fmt.Errorf("syntax error at %v", string(data))
+	}
+
+	c := data[1]
+	switch c {
+	case '"', '/', '\\':
+		r.token.byteValue = append(r.token.byteValue, c)
+		return 2, nil
+	case 'b':
+		r.token.byteValue = append(r.token.byteValue, '\b')
+		return 2, nil
+	case 'f':
+		r.token.byteValue = append(r.token.byteValue, '\f')
+		return 2, nil
+	case 'n':
+		r.token.byteValue = append(r.token.byteValue, '\n')
+		return 2, nil
+	case 'r':
+		r.token.byteValue = append(r.token.byteValue, '\r')
+		return 2, nil
+	case 't':
+		r.token.byteValue = append(r.token.byteValue, '\t')
+		return 2, nil
+	case 'u':
+		rr := getu4(data)
+		if rr < 0 {
+			return 0, errors.New("syntax error")
+		}
+
+		read := 6
+		if utf16.IsSurrogate(rr) {
+			rr1 := getu4(data[read:])
+			if dec := utf16.DecodeRune(rr, rr1); dec != unicode.ReplacementChar {
+				read += 6
+				rr = dec
+			} else {
+				rr = unicode.ReplacementChar
+			}
+		}
+		var d [4]byte
+		s := utf8.EncodeRune(d[:], rr)
+		r.token.byteValue = append(r.token.byteValue, d[:s]...)
+		return read, nil
+	}
+
+	return 0, errors.New("syntax error")
+}
+
+// fetchString scans a string literal token.
+func (r *Lexer) fetchString() {
+	r.pos++
+	data := r.Data[r.pos:]
+
+	isValid, hasEscapes, length := findStringLen(data)
+	if !isValid {
+		r.pos += length
+		r.errParse("unterminated string literal")
+		return
+	}
+	if !hasEscapes {
+		r.token.byteValue = data[:length]
+		r.pos += length + 1
+		return
+	}
+
+	r.token.byteValue = make([]byte, 0, length)
+	p := 0
+	for i := 0; i < len(data); {
+		switch data[i] {
+		case '"':
+			r.pos += i + 1
+			r.token.byteValue = append(r.token.byteValue, data[p:i]...)
+			i++
+			return
+
+		case '\\':
+			r.token.byteValue = append(r.token.byteValue, data[p:i]...)
+			off, err := r.processEscape(data[i:])
+			if err != nil {
+				r.errParse(err.Error())
+				return
+			}
+			i += off
+			p = i
+
+		default:
+			i++
+		}
+	}
+	r.errParse("unterminated string literal")
+}
+
+// scanToken scans the next token if no token is currently available in the lexer.
+func (r *Lexer) scanToken() {
+	if r.token.kind != tokenUndef || r.fatalError != nil {
+		return
+	}
+
+	r.FetchToken()
+}
+
+// consume resets the current token to allow scanning the next one.
+func (r *Lexer) consume() {
+	r.token.kind = tokenUndef
+	r.token.delimValue = 0
+}
+
+// Ok returns true if no error (including io.EOF) was encountered during scanning.
+func (r *Lexer) Ok() bool {
+	return r.fatalError == nil
+}
+
+const maxErrorContextLen = 13
+
+func (r *Lexer) errParse(what string) {
+	if r.fatalError == nil {
+		var str string
+		if len(r.Data)-r.pos <= maxErrorContextLen {
+			str = string(r.Data)
+		} else {
+			str = string(r.Data[r.pos:r.pos+maxErrorContextLen-3]) + "..."
+		}
+		r.fatalError = &LexerError{
+			Reason: what,
+			Offset: r.pos,
+			Data:   str,
+		}
+	}
+}
+
+func (r *Lexer) errSyntax() {
+	r.errParse("syntax error")
+}
+
+func (r *Lexer) errInvalidToken(expected string) {
+	if r.fatalError != nil {
+		return
+	}
+	if r.UseMultipleErrors {
+		r.pos = r.start
+		r.consume()
+		r.SkipRecursive()
+		switch expected {
+		case "[":
+			r.token.delimValue = ']'
+			r.token.kind = tokenDelim
+		case "{":
+			r.token.delimValue = '}'
+			r.token.kind = tokenDelim
+		}
+		r.addNonfatalError(&LexerError{
+			Reason: fmt.Sprintf("expected %s", expected),
+			Offset: r.start,
+			Data:   string(r.Data[r.start:r.pos]),
+		})
+		return
+	}
+
+	var str string
+	if len(r.token.byteValue) <= maxErrorContextLen {
+		str = string(r.token.byteValue)
+	} else {
+		str = string(r.token.byteValue[:maxErrorContextLen-3]) + "..."
+	}
+	r.fatalError = &LexerError{
+		Reason: fmt.Sprintf("expected %s", expected),
+		Offset: r.pos,
+		Data:   str,
+	}
+}
+
+func (r *Lexer) GetPos() int {
+	return r.pos
+}
+
+// Delim consumes a token and verifies that it is the given delimiter.
+func (r *Lexer) Delim(c byte) {
+	if r.token.kind == tokenUndef && r.Ok() {
+		r.FetchToken()
+	}
+
+	if !r.Ok() || r.token.delimValue != c {
+		r.consume() // errInvalidToken can change token if UseMultipleErrors is enabled.
+		r.errInvalidToken(string([]byte{c}))
+	} else {
+		r.consume()
+	}
+}
+
+// IsDelim returns true if there was no scanning error and next token is the given delimiter.
+func (r *Lexer) IsDelim(c byte) bool {
+	if r.token.kind == tokenUndef && r.Ok() {
+		r.FetchToken()
+	}
+	return !r.Ok() || r.token.delimValue == c
+}
+
+// Null verifies that the next token is null and consumes it.
+func (r *Lexer) Null() {
+	if r.token.kind == tokenUndef && r.Ok() {
+		r.FetchToken()
+	}
+	if !r.Ok() || r.token.kind != tokenNull {
+		r.errInvalidToken("null")
+	}
+	r.consume()
+}
+
+// IsNull returns true if the next token is a null keyword.
+func (r *Lexer) IsNull() bool {
+	if r.token.kind == tokenUndef && r.Ok() {
+		r.FetchToken()
+	}
+	return r.Ok() && r.token.kind == tokenNull
+}
+
+// Skip skips a single token.
+func (r *Lexer) Skip() {
+	if r.token.kind == tokenUndef && r.Ok() {
+		r.FetchToken()
+	}
+	r.consume()
+}
+
+// SkipRecursive skips next array or object completely, or just skips a single token if not
+// an array/object.
+//
+// Note: no syntax validation is performed on the skipped data.
+func (r *Lexer) SkipRecursive() {
+	r.scanToken()
+	var start, end byte
+
+	switch r.token.delimValue {
+	case '{':
+		start, end = '{', '}'
+	case '[':
+		start, end = '[', ']'
+	default:
+		r.consume()
+		return
+	}
+
+	r.consume()
+
+	level := 1
+	inQuotes := false
+	wasEscape := false
+
+	for i, c := range r.Data[r.pos:] {
+		switch {
+		case c == start && !inQuotes:
+			level++
+		case c == end && !inQuotes:
+			level--
+			if level == 0 {
+				r.pos += i + 1
+				return
+			}
+		case c == '\\' && inQuotes:
+			wasEscape = !wasEscape
+			continue
+		case c == '"' && inQuotes:
+			inQuotes = wasEscape
+		case c == '"':
+			inQuotes = true
+		}
+		wasEscape = false
+	}
+	r.pos = len(r.Data)
+	r.fatalError = &LexerError{
+		Reason: "EOF reached while skipping array/object or token",
+		Offset: r.pos,
+		Data:   string(r.Data[r.pos:]),
+	}
+}
+
+// Raw fetches the next item recursively as a data slice
+func (r *Lexer) Raw() []byte {
+	r.SkipRecursive()
+	if !r.Ok() {
+		return nil
+	}
+	return r.Data[r.start:r.pos]
+}
+
+// IsStart returns whether the lexer is positioned at the start
+// of an input string.
+func (r *Lexer) IsStart() bool {
+	return r.pos == 0
+}
+
+// Consumed reads all remaining bytes from the input, publishing an error if
+// there is anything but whitespace remaining.
+func (r *Lexer) Consumed() {
+	if r.pos > len(r.Data) || !r.Ok() {
+		return
+	}
+
+	for _, c := range r.Data[r.pos:] {
+		if c != ' ' && c != '\t' && c != '\r' && c != '\n' {
+			r.AddError(&LexerError{
+				Reason: "invalid character '" + string(c) + "' after top-level value",
+				Offset: r.pos,
+				Data:   string(r.Data[r.pos:]),
+			})
+			return
+		}
+
+		r.pos++
+		r.start++
+	}
+}
+
+func (r *Lexer) unsafeString() (string, []byte) {
+	if r.token.kind == tokenUndef && r.Ok() {
+		r.FetchToken()
+	}
+	if !r.Ok() || r.token.kind != tokenString {
+		r.errInvalidToken("string")
+		return "", nil
+	}
+	bytes := r.token.byteValue
+	ret := bytesToStr(r.token.byteValue)
+	r.consume()
+	return ret, bytes
+}
+
+// UnsafeString returns the string value if the token is a string literal.
+//
+// Warning: returned string may point to the input buffer, so the string should not outlive
+// the input buffer. Intended pattern of usage is as an argument to a switch statement.
+func (r *Lexer) UnsafeString() string {
+	ret, _ := r.unsafeString()
+	return ret
+}
+
+// UnsafeBytes returns the byte slice if the token is a string literal.
+func (r *Lexer) UnsafeBytes() []byte {
+	_, ret := r.unsafeString()
+	return ret
+}
+
+// String reads a string literal.
+func (r *Lexer) String() string {
+	if r.token.kind == tokenUndef && r.Ok() {
+		r.FetchToken()
+	}
+	if !r.Ok() || r.token.kind != tokenString {
+		r.errInvalidToken("string")
+		return ""
+	}
+	ret := string(r.token.byteValue)
+	r.consume()
+	return ret
+}
+
+// Bytes reads a string literal and base64 decodes it into a byte slice.
+func (r *Lexer) Bytes() []byte {
+	if r.token.kind == tokenUndef && r.Ok() {
+		r.FetchToken()
+	}
+	if !r.Ok() || r.token.kind != tokenString {
+		r.errInvalidToken("string")
+		return nil
+	}
+	ret := make([]byte, base64.StdEncoding.DecodedLen(len(r.token.byteValue)))
+	n, err := base64.StdEncoding.Decode(ret, r.token.byteValue)
+	if err != nil {
+		r.fatalError = &LexerError{
+			Reason: err.Error(),
+		}
+		return nil
+	}
+
+	r.consume()
+	return ret[:n]
+}
+
+// Bool reads a true or false boolean keyword.
+func (r *Lexer) Bool() bool {
+	if r.token.kind == tokenUndef && r.Ok() {
+		r.FetchToken()
+	}
+	if !r.Ok() || r.token.kind != tokenBool {
+		r.errInvalidToken("bool")
+		return false
+	}
+	ret := r.token.boolValue
+	r.consume()
+	return ret
+}
+
+func (r *Lexer) number() string {
+	if r.token.kind == tokenUndef && r.Ok() {
+		r.FetchToken()
+	}
+	if !r.Ok() || r.token.kind != tokenNumber {
+		r.errInvalidToken("number")
+		return ""
+	}
+	ret := bytesToStr(r.token.byteValue)
+	r.consume()
+	return ret
+}
+
+func (r *Lexer) Uint8() uint8 {
+	s := r.number()
+	if !r.Ok() {
+		return 0
+	}
+
+	n, err := strconv.ParseUint(s, 10, 8)
+	if err != nil {
+		r.addNonfatalError(&LexerError{
+			Offset: r.start,
+			Reason: err.Error(),
+			Data:   s,
+		})
+	}
+	return uint8(n)
+}
+
+func (r *Lexer) Uint16() uint16 {
+	s := r.number()
+	if !r.Ok() {
+		return 0
+	}
+
+	n, err := strconv.ParseUint(s, 10, 16)
+	if err != nil {
+		r.addNonfatalError(&LexerError{
+			Offset: r.start,
+			Reason: err.Error(),
+			Data:   s,
+		})
+	}
+	return uint16(n)
+}
+
+func (r *Lexer) Uint32() uint32 {
+	s := r.number()
+	if !r.Ok() {
+		return 0
+	}
+
+	n, err := strconv.ParseUint(s, 10, 32)
+	if err != nil {
+		r.addNonfatalError(&LexerError{
+			Offset: r.start,
+			Reason: err.Error(),
+			Data:   s,
+		})
+	}
+	return uint32(n)
+}
+
+func (r *Lexer) Uint64() uint64 {
+	s := r.number()
+	if !r.Ok() {
+		return 0
+	}
+
+	n, err := strconv.ParseUint(s, 10, 64)
+	if err != nil {
+		r.addNonfatalError(&LexerError{
+			Offset: r.start,
+			Reason: err.Error(),
+			Data:   s,
+		})
+	}
+	return n
+}
+
+func (r *Lexer) Uint() uint {
+	return uint(r.Uint64())
+}
+
+func (r *Lexer) Int8() int8 {
+	s := r.number()
+	if !r.Ok() {
+		return 0
+	}
+
+	n, err := strconv.ParseInt(s, 10, 8)
+	if err != nil {
+		r.addNonfatalError(&LexerError{
+			Offset: r.start,
+			Reason: err.Error(),
+			Data:   s,
+		})
+	}
+	return int8(n)
+}
+
+func (r *Lexer) Int16() int16 {
+	s := r.number()
+	if !r.Ok() {
+		return 0
+	}
+
+	n, err := strconv.ParseInt(s, 10, 16)
+	if err != nil {
+		r.addNonfatalError(&LexerError{
+			Offset: r.start,
+			Reason: err.Error(),
+			Data:   s,
+		})
+	}
+	return int16(n)
+}
+
+func (r *Lexer) Int32() int32 {
+	s := r.number()
+	if !r.Ok() {
+		return 0
+	}
+
+	n, err := strconv.ParseInt(s, 10, 32)
+	if err != nil {
+		r.addNonfatalError(&LexerError{
+			Offset: r.start,
+			Reason: err.Error(),
+			Data:   s,
+		})
+	}
+	return int32(n)
+}
+
+func (r *Lexer) Int64() int64 {
+	s := r.number()
+	if !r.Ok() {
+		return 0
+	}
+
+	n, err := strconv.ParseInt(s, 10, 64)
+	if err != nil {
+		r.addNonfatalError(&LexerError{
+			Offset: r.start,
+			Reason: err.Error(),
+			Data:   s,
+		})
+	}
+	return n
+}
+
+func (r *Lexer) Int() int {
+	return int(r.Int64())
+}
+
+func (r *Lexer) Uint8Str() uint8 {
+	s, b := r.unsafeString()
+	if !r.Ok() {
+		return 0
+	}
+
+	n, err := strconv.ParseUint(s, 10, 8)
+	if err != nil {
+		r.addNonfatalError(&LexerError{
+			Offset: r.start,
+			Reason: err.Error(),
+			Data:   string(b),
+		})
+	}
+	return uint8(n)
+}
+
+func (r *Lexer) Uint16Str() uint16 {
+	s, b := r.unsafeString()
+	if !r.Ok() {
+		return 0
+	}
+
+	n, err := strconv.ParseUint(s, 10, 16)
+	if err != nil {
+		r.addNonfatalError(&LexerError{
+			Offset: r.start,
+			Reason: err.Error(),
+			Data:   string(b),
+		})
+	}
+	return uint16(n)
+}
+
+func (r *Lexer) Uint32Str() uint32 {
+	s, b := r.unsafeString()
+	if !r.Ok() {
+		return 0
+	}
+
+	n, err := strconv.ParseUint(s, 10, 32)
+	if err != nil {
+		r.addNonfatalError(&LexerError{
+			Offset: r.start,
+			Reason: err.Error(),
+			Data:   string(b),
+		})
+	}
+	return uint32(n)
+}
+
+func (r *Lexer) Uint64Str() uint64 {
+	s, b := r.unsafeString()
+	if !r.Ok() {
+		return 0
+	}
+
+	n, err := strconv.ParseUint(s, 10, 64)
+	if err != nil {
+		r.addNonfatalError(&LexerError{
+			Offset: r.start,
+			Reason: err.Error(),
+			Data:   string(b),
+		})
+	}
+	return n
+}
+
+func (r *Lexer) UintStr() uint {
+	return uint(r.Uint64Str())
+}
+
+func (r *Lexer) UintptrStr() uintptr {
+	return uintptr(r.Uint64Str())
+}
+
+func (r *Lexer) Int8Str() int8 {
+	s, b := r.unsafeString()
+	if !r.Ok() {
+		return 0
+	}
+
+	n, err := strconv.ParseInt(s, 10, 8)
+	if err != nil {
+		r.addNonfatalError(&LexerError{
+			Offset: r.start,
+			Reason: err.Error(),
+			Data:   string(b),
+		})
+	}
+	return int8(n)
+}
+
+func (r *Lexer) Int16Str() int16 {
+	s, b := r.unsafeString()
+	if !r.Ok() {
+		return 0
+	}
+
+	n, err := strconv.ParseInt(s, 10, 16)
+	if err != nil {
+		r.addNonfatalError(&LexerError{
+			Offset: r.start,
+			Reason: err.Error(),
+			Data:   string(b),
+		})
+	}
+	return int16(n)
+}
+
+func (r *Lexer) Int32Str() int32 {
+	s, b := r.unsafeString()
+	if !r.Ok() {
+		return 0
+	}
+
+	n, err := strconv.ParseInt(s, 10, 32)
+	if err != nil {
+		r.addNonfatalError(&LexerError{
+			Offset: r.start,
+			Reason: err.Error(),
+			Data:   string(b),
+		})
+	}
+	return int32(n)
+}
+
+func (r *Lexer) Int64Str() int64 {
+	s, b := r.unsafeString()
+	if !r.Ok() {
+		return 0
+	}
+
+	n, err := strconv.ParseInt(s, 10, 64)
+	if err != nil {
+		r.addNonfatalError(&LexerError{
+			Offset: r.start,
+			Reason: err.Error(),
+			Data:   string(b),
+		})
+	}
+	return n
+}
+
+func (r *Lexer) IntStr() int {
+	return int(r.Int64Str())
+}
+
+func (r *Lexer) Float32() float32 {
+	s := r.number()
+	if !r.Ok() {
+		return 0
+	}
+
+	n, err := strconv.ParseFloat(s, 32)
+	if err != nil {
+		r.addNonfatalError(&LexerError{
+			Offset: r.start,
+			Reason: err.Error(),
+			Data:   s,
+		})
+	}
+	return float32(n)
+}
+
+func (r *Lexer) Float32Str() float32 {
+	s, b := r.unsafeString()
+	if !r.Ok() {
+		return 0
+	}
+	n, err := strconv.ParseFloat(s, 32)
+	if err != nil {
+		r.addNonfatalError(&LexerError{
+			Offset: r.start,
+			Reason: err.Error(),
+			Data:   string(b),
+		})
+	}
+	return float32(n)
+}
+
+func (r *Lexer) Float64() float64 {
+	s := r.number()
+	if !r.Ok() {
+		return 0
+	}
+
+	n, err := strconv.ParseFloat(s, 64)
+	if err != nil {
+		r.addNonfatalError(&LexerError{
+			Offset: r.start,
+			Reason: err.Error(),
+			Data:   s,
+		})
+	}
+	return n
+}
+
+func (r *Lexer) Float64Str() float64 {
+	s, b := r.unsafeString()
+	if !r.Ok() {
+		return 0
+	}
+	n, err := strconv.ParseFloat(s, 64)
+	if err != nil {
+		r.addNonfatalError(&LexerError{
+			Offset: r.start,
+			Reason: err.Error(),
+			Data:   string(b),
+		})
+	}
+	return n
+}
+
+func (r *Lexer) Error() error {
+	return r.fatalError
+}
+
+func (r *Lexer) AddError(e error) {
+	if r.fatalError == nil {
+		r.fatalError = e
+	}
+}
+
+func (r *Lexer) AddNonFatalError(e error) {
+	r.addNonfatalError(&LexerError{
+		Offset: r.start,
+		Data:   string(r.Data[r.start:r.pos]),
+		Reason: e.Error(),
+	})
+}
+
+func (r *Lexer) addNonfatalError(err *LexerError) {
+	if r.UseMultipleErrors {
+		// We don't want to add errors with the same offset.
+		if len(r.multipleErrors) != 0 && r.multipleErrors[len(r.multipleErrors)-1].Offset == err.Offset {
+			return
+		}
+		r.multipleErrors = append(r.multipleErrors, err)
+		return
+	}
+	r.fatalError = err
+}
+
+func (r *Lexer) GetNonFatalErrors() []*LexerError {
+	return r.multipleErrors
+}
+
+// JsonNumber fetches and json.Number from 'encoding/json' package.
+// Both int, float or string, contains them are valid values
+func (r *Lexer) JsonNumber() json.Number {
+	if r.token.kind == tokenUndef && r.Ok() {
+		r.FetchToken()
+	}
+	if !r.Ok() {
+		r.errInvalidToken("json.Number")
+		return json.Number("")
+	}
+
+	switch r.token.kind {
+	case tokenString:
+		return json.Number(r.String())
+	case tokenNumber:
+		return json.Number(r.Raw())
+	case tokenNull:
+		r.Null()
+		return json.Number("")
+	default:
+		r.errSyntax()
+		return json.Number("")
+	}
+}
+
+// Interface fetches an interface{} analogous to the 'encoding/json' package.
+func (r *Lexer) Interface() interface{} {
+	if r.token.kind == tokenUndef && r.Ok() {
+		r.FetchToken()
+	}
+
+	if !r.Ok() {
+		return nil
+	}
+	switch r.token.kind {
+	case tokenString:
+		return r.String()
+	case tokenNumber:
+		return r.Float64()
+	case tokenBool:
+		return r.Bool()
+	case tokenNull:
+		r.Null()
+		return nil
+	}
+
+	if r.token.delimValue == '{' {
+		r.consume()
+
+		ret := map[string]interface{}{}
+		for !r.IsDelim('}') {
+			key := r.String()
+			r.WantColon()
+			ret[key] = r.Interface()
+			r.WantComma()
+		}
+		r.Delim('}')
+
+		if r.Ok() {
+			return ret
+		} else {
+			return nil
+		}
+	} else if r.token.delimValue == '[' {
+		r.consume()
+
+		ret := []interface{}{}
+		for !r.IsDelim(']') {
+			ret = append(ret, r.Interface())
+			r.WantComma()
+		}
+		r.Delim(']')
+
+		if r.Ok() {
+			return ret
+		} else {
+			return nil
+		}
+	}
+	r.errSyntax()
+	return nil
+}
+
+// WantComma requires a comma to be present before fetching next token.
+func (r *Lexer) WantComma() {
+	r.wantSep = ','
+	r.firstElement = false
+}
+
+// WantColon requires a colon to be present before fetching next token.
+func (r *Lexer) WantColon() {
+	r.wantSep = ':'
+	r.firstElement = false
+}
diff --git a/vendor/github.com/mailru/easyjson/jwriter/writer.go b/vendor/github.com/mailru/easyjson/jwriter/writer.go
new file mode 100644
index 00000000..b9ed7cca
--- /dev/null
+++ b/vendor/github.com/mailru/easyjson/jwriter/writer.go
@@ -0,0 +1,390 @@
+// Package jwriter contains a JSON writer.
+package jwriter
+
+import (
+	"io"
+	"strconv"
+	"unicode/utf8"
+
+	"github.com/mailru/easyjson/buffer"
+)
+
+// Flags describe various encoding options. The behavior may be actually implemented in the encoder, but
+// Flags field in Writer is used to set and pass them around.
+type Flags int
+
+const (
+	NilMapAsEmpty   Flags = 1 << iota // Encode nil map as '{}' rather than 'null'.
+	NilSliceAsEmpty                   // Encode nil slice as '[]' rather than 'null'.
+)
+
+// Writer is a JSON writer.
+type Writer struct {
+	Flags Flags
+
+	Error        error
+	Buffer       buffer.Buffer
+	NoEscapeHTML bool
+}
+
+// Size returns the size of the data that was written out.
+func (w *Writer) Size() int {
+	return w.Buffer.Size()
+}
+
+// DumpTo outputs the data to given io.Writer, resetting the buffer.
+func (w *Writer) DumpTo(out io.Writer) (written int, err error) {
+	return w.Buffer.DumpTo(out)
+}
+
+// BuildBytes returns writer data as a single byte slice. You can optionally provide one byte slice
+// as argument that it will try to reuse.
+func (w *Writer) BuildBytes(reuse ...[]byte) ([]byte, error) {
+	if w.Error != nil {
+		return nil, w.Error
+	}
+
+	return w.Buffer.BuildBytes(reuse...), nil
+}
+
+// ReadCloser returns an io.ReadCloser that can be used to read the data.
+// ReadCloser also resets the buffer.
+func (w *Writer) ReadCloser() (io.ReadCloser, error) {
+	if w.Error != nil {
+		return nil, w.Error
+	}
+
+	return w.Buffer.ReadCloser(), nil
+}
+
+// RawByte appends raw binary data to the buffer.
+func (w *Writer) RawByte(c byte) {
+	w.Buffer.AppendByte(c)
+}
+
+// RawByte appends raw binary data to the buffer.
+func (w *Writer) RawString(s string) {
+	w.Buffer.AppendString(s)
+}
+
+// Raw appends raw binary data to the buffer or sets the error if it is given. Useful for
+// calling with results of MarshalJSON-like functions.
+func (w *Writer) Raw(data []byte, err error) {
+	switch {
+	case w.Error != nil:
+		return
+	case err != nil:
+		w.Error = err
+	case len(data) > 0:
+		w.Buffer.AppendBytes(data)
+	default:
+		w.RawString("null")
+	}
+}
+
+// RawText encloses raw binary data in quotes and appends in to the buffer.
+// Useful for calling with results of MarshalText-like functions.
+func (w *Writer) RawText(data []byte, err error) {
+	switch {
+	case w.Error != nil:
+		return
+	case err != nil:
+		w.Error = err
+	case len(data) > 0:
+		w.String(string(data))
+	default:
+		w.RawString("null")
+	}
+}
+
+// Base64Bytes appends data to the buffer after base64 encoding it
+func (w *Writer) Base64Bytes(data []byte) {
+	if data == nil {
+		w.Buffer.AppendString("null")
+		return
+	}
+	w.Buffer.AppendByte('"')
+	w.base64(data)
+	w.Buffer.AppendByte('"')
+}
+
+func (w *Writer) Uint8(n uint8) {
+	w.Buffer.EnsureSpace(3)
+	w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10)
+}
+
+func (w *Writer) Uint16(n uint16) {
+	w.Buffer.EnsureSpace(5)
+	w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10)
+}
+
+func (w *Writer) Uint32(n uint32) {
+	w.Buffer.EnsureSpace(10)
+	w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10)
+}
+
+func (w *Writer) Uint(n uint) {
+	w.Buffer.EnsureSpace(20)
+	w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10)
+}
+
+func (w *Writer) Uint64(n uint64) {
+	w.Buffer.EnsureSpace(20)
+	w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, n, 10)
+}
+
+func (w *Writer) Int8(n int8) {
+	w.Buffer.EnsureSpace(4)
+	w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, int64(n), 10)
+}
+
+func (w *Writer) Int16(n int16) {
+	w.Buffer.EnsureSpace(6)
+	w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, int64(n), 10)
+}
+
+func (w *Writer) Int32(n int32) {
+	w.Buffer.EnsureSpace(11)
+	w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, int64(n), 10)
+}
+
+func (w *Writer) Int(n int) {
+	w.Buffer.EnsureSpace(21)
+	w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, int64(n), 10)
+}
+
+func (w *Writer) Int64(n int64) {
+	w.Buffer.EnsureSpace(21)
+	w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, n, 10)
+}
+
+func (w *Writer) Uint8Str(n uint8) {
+	w.Buffer.EnsureSpace(3)
+	w.Buffer.Buf = append(w.Buffer.Buf, '"')
+	w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10)
+	w.Buffer.Buf = append(w.Buffer.Buf, '"')
+}
+
+func (w *Writer) Uint16Str(n uint16) {
+	w.Buffer.EnsureSpace(5)
+	w.Buffer.Buf = append(w.Buffer.Buf, '"')
+	w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10)
+	w.Buffer.Buf = append(w.Buffer.Buf, '"')
+}
+
+func (w *Writer) Uint32Str(n uint32) {
+	w.Buffer.EnsureSpace(10)
+	w.Buffer.Buf = append(w.Buffer.Buf, '"')
+	w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10)
+	w.Buffer.Buf = append(w.Buffer.Buf, '"')
+}
+
+func (w *Writer) UintStr(n uint) {
+	w.Buffer.EnsureSpace(20)
+	w.Buffer.Buf = append(w.Buffer.Buf, '"')
+	w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10)
+	w.Buffer.Buf = append(w.Buffer.Buf, '"')
+}
+
+func (w *Writer) Uint64Str(n uint64) {
+	w.Buffer.EnsureSpace(20)
+	w.Buffer.Buf = append(w.Buffer.Buf, '"')
+	w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, n, 10)
+	w.Buffer.Buf = append(w.Buffer.Buf, '"')
+}
+
+func (w *Writer) UintptrStr(n uintptr) {
+	w.Buffer.EnsureSpace(20)
+	w.Buffer.Buf = append(w.Buffer.Buf, '"')
+	w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10)
+	w.Buffer.Buf = append(w.Buffer.Buf, '"')
+}
+
+func (w *Writer) Int8Str(n int8) {
+	w.Buffer.EnsureSpace(4)
+	w.Buffer.Buf = append(w.Buffer.Buf, '"')
+	w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, int64(n), 10)
+	w.Buffer.Buf = append(w.Buffer.Buf, '"')
+}
+
+func (w *Writer) Int16Str(n int16) {
+	w.Buffer.EnsureSpace(6)
+	w.Buffer.Buf = append(w.Buffer.Buf, '"')
+	w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, int64(n), 10)
+	w.Buffer.Buf = append(w.Buffer.Buf, '"')
+}
+
+func (w *Writer) Int32Str(n int32) {
+	w.Buffer.EnsureSpace(11)
+	w.Buffer.Buf = append(w.Buffer.Buf, '"')
+	w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, int64(n), 10)
+	w.Buffer.Buf = append(w.Buffer.Buf, '"')
+}
+
+func (w *Writer) IntStr(n int) {
+	w.Buffer.EnsureSpace(21)
+	w.Buffer.Buf = append(w.Buffer.Buf, '"')
+	w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, int64(n), 10)
+	w.Buffer.Buf = append(w.Buffer.Buf, '"')
+}
+
+func (w *Writer) Int64Str(n int64) {
+	w.Buffer.EnsureSpace(21)
+	w.Buffer.Buf = append(w.Buffer.Buf, '"')
+	w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, n, 10)
+	w.Buffer.Buf = append(w.Buffer.Buf, '"')
+}
+
+func (w *Writer) Float32(n float32) {
+	w.Buffer.EnsureSpace(20)
+	w.Buffer.Buf = strconv.AppendFloat(w.Buffer.Buf, float64(n), 'g', -1, 32)
+}
+
+func (w *Writer) Float32Str(n float32) {
+	w.Buffer.EnsureSpace(20)
+	w.Buffer.Buf = append(w.Buffer.Buf, '"')
+	w.Buffer.Buf = strconv.AppendFloat(w.Buffer.Buf, float64(n), 'g', -1, 32)
+	w.Buffer.Buf = append(w.Buffer.Buf, '"')
+}
+
+func (w *Writer) Float64(n float64) {
+	w.Buffer.EnsureSpace(20)
+	w.Buffer.Buf = strconv.AppendFloat(w.Buffer.Buf, n, 'g', -1, 64)
+}
+
+func (w *Writer) Float64Str(n float64) {
+	w.Buffer.EnsureSpace(20)
+	w.Buffer.Buf = append(w.Buffer.Buf, '"')
+	w.Buffer.Buf = strconv.AppendFloat(w.Buffer.Buf, float64(n), 'g', -1, 64)
+	w.Buffer.Buf = append(w.Buffer.Buf, '"')
+}
+
+func (w *Writer) Bool(v bool) {
+	w.Buffer.EnsureSpace(5)
+	if v {
+		w.Buffer.Buf = append(w.Buffer.Buf, "true"...)
+	} else {
+		w.Buffer.Buf = append(w.Buffer.Buf, "false"...)
+	}
+}
+
+const chars = "0123456789abcdef"
+
+func isNotEscapedSingleChar(c byte, escapeHTML bool) bool {
+	// Note: might make sense to use a table if there are more chars to escape. With 4 chars
+	// it benchmarks the same.
+	if escapeHTML {
+		return c != '<' && c != '>' && c != '&' && c != '\\' && c != '"' && c >= 0x20 && c < utf8.RuneSelf
+	} else {
+		return c != '\\' && c != '"' && c >= 0x20 && c < utf8.RuneSelf
+	}
+}
+
+func (w *Writer) String(s string) {
+	w.Buffer.AppendByte('"')
+
+	// Portions of the string that contain no escapes are appended as
+	// byte slices.
+
+	p := 0 // last non-escape symbol
+
+	for i := 0; i < len(s); {
+		c := s[i]
+
+		if isNotEscapedSingleChar(c, !w.NoEscapeHTML) {
+			// single-width character, no escaping is required
+			i++
+			continue
+		} else if c < utf8.RuneSelf {
+			// single-with character, need to escape
+			w.Buffer.AppendString(s[p:i])
+			switch c {
+			case '\t':
+				w.Buffer.AppendString(`\t`)
+			case '\r':
+				w.Buffer.AppendString(`\r`)
+			case '\n':
+				w.Buffer.AppendString(`\n`)
+			case '\\':
+				w.Buffer.AppendString(`\\`)
+			case '"':
+				w.Buffer.AppendString(`\"`)
+			default:
+				w.Buffer.AppendString(`\u00`)
+				w.Buffer.AppendByte(chars[c>>4])
+				w.Buffer.AppendByte(chars[c&0xf])
+			}
+
+			i++
+			p = i
+			continue
+		}
+
+		// broken utf
+		runeValue, runeWidth := utf8.DecodeRuneInString(s[i:])
+		if runeValue == utf8.RuneError && runeWidth == 1 {
+			w.Buffer.AppendString(s[p:i])
+			w.Buffer.AppendString(`\ufffd`)
+			i++
+			p = i
+			continue
+		}
+
+		// jsonp stuff - tab separator and line separator
+		if runeValue == '\u2028' || runeValue == '\u2029' {
+			w.Buffer.AppendString(s[p:i])
+			w.Buffer.AppendString(`\u202`)
+			w.Buffer.AppendByte(chars[runeValue&0xf])
+			i += runeWidth
+			p = i
+			continue
+		}
+		i += runeWidth
+	}
+	w.Buffer.AppendString(s[p:])
+	w.Buffer.AppendByte('"')
+}
+
+const encode = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"
+const padChar = '='
+
+func (w *Writer) base64(in []byte) {
+
+	if len(in) == 0 {
+		return
+	}
+
+	w.Buffer.EnsureSpace(((len(in)-1)/3 + 1) * 4)
+
+	si := 0
+	n := (len(in) / 3) * 3
+
+	for si < n {
+		// Convert 3x 8bit source bytes into 4 bytes
+		val := uint(in[si+0])<<16 | uint(in[si+1])<<8 | uint(in[si+2])
+
+		w.Buffer.Buf = append(w.Buffer.Buf, encode[val>>18&0x3F], encode[val>>12&0x3F], encode[val>>6&0x3F], encode[val&0x3F])
+
+		si += 3
+	}
+
+	remain := len(in) - si
+	if remain == 0 {
+		return
+	}
+
+	// Add the remaining small block
+	val := uint(in[si+0]) << 16
+	if remain == 2 {
+		val |= uint(in[si+1]) << 8
+	}
+
+	w.Buffer.Buf = append(w.Buffer.Buf, encode[val>>18&0x3F], encode[val>>12&0x3F])
+
+	switch remain {
+	case 2:
+		w.Buffer.Buf = append(w.Buffer.Buf, encode[val>>6&0x3F], byte(padChar))
+	case 1:
+		w.Buffer.Buf = append(w.Buffer.Buf, byte(padChar), byte(padChar))
+	}
+}
diff --git a/vendor/github.com/mitchellh/go-wordwrap/LICENSE.md b/vendor/github.com/mitchellh/go-wordwrap/LICENSE.md
new file mode 100644
index 00000000..22985159
--- /dev/null
+++ b/vendor/github.com/mitchellh/go-wordwrap/LICENSE.md
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2014 Mitchell Hashimoto
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/vendor/github.com/mitchellh/go-wordwrap/README.md b/vendor/github.com/mitchellh/go-wordwrap/README.md
new file mode 100644
index 00000000..60ae3117
--- /dev/null
+++ b/vendor/github.com/mitchellh/go-wordwrap/README.md
@@ -0,0 +1,39 @@
+# go-wordwrap
+
+`go-wordwrap` (Golang package: `wordwrap`) is a package for Go that
+automatically wraps words into multiple lines. The primary use case for this
+is in formatting CLI output, but of course word wrapping is a generally useful
+thing to do.
+
+## Installation and Usage
+
+Install using `go get github.com/mitchellh/go-wordwrap`.
+
+Full documentation is available at
+http://godoc.org/github.com/mitchellh/go-wordwrap
+
+Below is an example of its usage ignoring errors:
+
+```go
+wrapped := wordwrap.WrapString("foo bar baz", 3)
+fmt.Println(wrapped)
+```
+
+Would output:
+
+```
+foo
+bar
+baz
+```
+
+## Word Wrap Algorithm
+
+This library doesn't use any clever algorithm for word wrapping. The wrapping
+is actually very naive: whenever there is whitespace or an explicit linebreak.
+The goal of this library is for word wrapping CLI output, so the input is
+typically pretty well controlled human language. Because of this, the naive
+approach typically works just fine.
+
+In the future, we'd like to make the algorithm more advanced. We would do
+so without breaking the API.
diff --git a/vendor/github.com/mitchellh/go-wordwrap/go.mod b/vendor/github.com/mitchellh/go-wordwrap/go.mod
new file mode 100644
index 00000000..2ae411b2
--- /dev/null
+++ b/vendor/github.com/mitchellh/go-wordwrap/go.mod
@@ -0,0 +1 @@
+module github.com/mitchellh/go-wordwrap
diff --git a/vendor/github.com/mitchellh/go-wordwrap/wordwrap.go b/vendor/github.com/mitchellh/go-wordwrap/wordwrap.go
new file mode 100644
index 00000000..ac67205b
--- /dev/null
+++ b/vendor/github.com/mitchellh/go-wordwrap/wordwrap.go
@@ -0,0 +1,73 @@
+package wordwrap
+
+import (
+	"bytes"
+	"unicode"
+)
+
+// WrapString wraps the given string within lim width in characters.
+//
+// Wrapping is currently naive and only happens at white-space. A future
+// version of the library will implement smarter wrapping. This means that
+// pathological cases can dramatically reach past the limit, such as a very
+// long word.
+func WrapString(s string, lim uint) string {
+	// Initialize a buffer with a slightly larger size to account for breaks
+	init := make([]byte, 0, len(s))
+	buf := bytes.NewBuffer(init)
+
+	var current uint
+	var wordBuf, spaceBuf bytes.Buffer
+
+	for _, char := range s {
+		if char == '\n' {
+			if wordBuf.Len() == 0 {
+				if current+uint(spaceBuf.Len()) > lim {
+					current = 0
+				} else {
+					current += uint(spaceBuf.Len())
+					spaceBuf.WriteTo(buf)
+				}
+				spaceBuf.Reset()
+			} else {
+				current += uint(spaceBuf.Len() + wordBuf.Len())
+				spaceBuf.WriteTo(buf)
+				spaceBuf.Reset()
+				wordBuf.WriteTo(buf)
+				wordBuf.Reset()
+			}
+			buf.WriteRune(char)
+			current = 0
+		} else if unicode.IsSpace(char) {
+			if spaceBuf.Len() == 0 || wordBuf.Len() > 0 {
+				current += uint(spaceBuf.Len() + wordBuf.Len())
+				spaceBuf.WriteTo(buf)
+				spaceBuf.Reset()
+				wordBuf.WriteTo(buf)
+				wordBuf.Reset()
+			}
+
+			spaceBuf.WriteRune(char)
+		} else {
+
+			wordBuf.WriteRune(char)
+
+			if current+uint(spaceBuf.Len()+wordBuf.Len()) > lim && uint(wordBuf.Len()) < lim {
+				buf.WriteRune('\n')
+				current = 0
+				spaceBuf.Reset()
+			}
+		}
+	}
+
+	if wordBuf.Len() == 0 {
+		if current+uint(spaceBuf.Len()) <= lim {
+			spaceBuf.WriteTo(buf)
+		}
+	} else {
+		spaceBuf.WriteTo(buf)
+		wordBuf.WriteTo(buf)
+	}
+
+	return buf.String()
+}
diff --git a/vendor/github.com/peterbourgon/diskv/LICENSE b/vendor/github.com/peterbourgon/diskv/LICENSE
new file mode 100644
index 00000000..41ce7f16
--- /dev/null
+++ b/vendor/github.com/peterbourgon/diskv/LICENSE
@@ -0,0 +1,19 @@
+Copyright (c) 2011-2012 Peter Bourgon
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/vendor/github.com/peterbourgon/diskv/README.md b/vendor/github.com/peterbourgon/diskv/README.md
new file mode 100644
index 00000000..3474739e
--- /dev/null
+++ b/vendor/github.com/peterbourgon/diskv/README.md
@@ -0,0 +1,141 @@
+# What is diskv?
+
+Diskv (disk-vee) is a simple, persistent key-value store written in the Go
+language. It starts with an incredibly simple API for storing arbitrary data on
+a filesystem by key, and builds several layers of performance-enhancing
+abstraction on top.  The end result is a conceptually simple, but highly
+performant, disk-backed storage system.
+
+[![Build Status][1]][2]
+
+[1]: https://drone.io/github.com/peterbourgon/diskv/status.png
+[2]: https://drone.io/github.com/peterbourgon/diskv/latest
+
+
+# Installing
+
+Install [Go 1][3], either [from source][4] or [with a prepackaged binary][5].
+Then,
+
+```bash
+$ go get github.com/peterbourgon/diskv
+```
+
+[3]: http://golang.org
+[4]: http://golang.org/doc/install/source
+[5]: http://golang.org/doc/install
+
+
+# Usage
+
+```go
+package main
+
+import (
+	"fmt"
+	"github.com/peterbourgon/diskv"
+)
+
+func main() {
+	// Simplest transform function: put all the data files into the base dir.
+	flatTransform := func(s string) []string { return []string{} }
+
+	// Initialize a new diskv store, rooted at "my-data-dir", with a 1MB cache.
+	d := diskv.New(diskv.Options{
+		BasePath:     "my-data-dir",
+		Transform:    flatTransform,
+		CacheSizeMax: 1024 * 1024,
+	})
+
+	// Write three bytes to the key "alpha".
+	key := "alpha"
+	d.Write(key, []byte{'1', '2', '3'})
+
+	// Read the value back out of the store.
+	value, _ := d.Read(key)
+	fmt.Printf("%v\n", value)
+
+	// Erase the key+value from the store (and the disk).
+	d.Erase(key)
+}
+```
+
+More complex examples can be found in the "examples" subdirectory.
+
+
+# Theory
+
+## Basic idea
+
+At its core, diskv is a map of a key (`string`) to arbitrary data (`[]byte`).
+The data is written to a single file on disk, with the same name as the key.
+The key determines where that file will be stored, via a user-provided
+`TransformFunc`, which takes a key and returns a slice (`[]string`)
+corresponding to a path list where the key file will be stored. The simplest
+TransformFunc,
+
+```go
+func SimpleTransform (key string) []string {
+    return []string{}
+}
+```
+
+will place all keys in the same, base directory. The design is inspired by
+[Redis diskstore][6]; a TransformFunc which emulates the default diskstore
+behavior is available in the content-addressable-storage example.
+
+[6]: http://groups.google.com/group/redis-db/browse_thread/thread/d444bc786689bde9?pli=1
+
+**Note** that your TransformFunc should ensure that one valid key doesn't
+transform to a subset of another valid key. That is, it shouldn't be possible
+to construct valid keys that resolve to directory names. As a concrete example,
+if your TransformFunc splits on every 3 characters, then
+
+```go
+d.Write("abcabc", val) // OK: written to <base>/abc/abc/abcabc
+d.Write("abc", val)    // Error: attempted write to <base>/abc/abc, but it's a directory
+```
+
+This will be addressed in an upcoming version of diskv.
+
+Probably the most important design principle behind diskv is that your data is
+always flatly available on the disk. diskv will never do anything that would
+prevent you from accessing, copying, backing up, or otherwise interacting with
+your data via common UNIX commandline tools.
+
+## Adding a cache
+
+An in-memory caching layer is provided by combining the BasicStore
+functionality with a simple map structure, and keeping it up-to-date as
+appropriate. Since the map structure in Go is not threadsafe, it's combined
+with a RWMutex to provide safe concurrent access.
+
+## Adding order
+
+diskv is a key-value store and therefore inherently unordered. An ordering
+system can be injected into the store by passing something which satisfies the
+diskv.Index interface. (A default implementation, using Google's
+[btree][7] package, is provided.) Basically, diskv keeps an ordered (by a
+user-provided Less function) index of the keys, which can be queried.
+
+[7]: https://github.com/google/btree
+
+## Adding compression
+
+Something which implements the diskv.Compression interface may be passed
+during store creation, so that all Writes and Reads are filtered through
+a compression/decompression pipeline. Several default implementations,
+using stdlib compression algorithms, are provided. Note that data is cached
+compressed; the cost of decompression is borne with each Read.
+
+## Streaming
+
+diskv also now provides ReadStream and WriteStream methods, to allow very large
+data to be handled efficiently.
+
+
+# Future plans
+
+ * Needs plenty of robust testing: huge datasets, etc...
+ * More thorough benchmarking
+ * Your suggestions for use-cases I haven't thought of
diff --git a/vendor/github.com/peterbourgon/diskv/compression.go b/vendor/github.com/peterbourgon/diskv/compression.go
new file mode 100644
index 00000000..5192b027
--- /dev/null
+++ b/vendor/github.com/peterbourgon/diskv/compression.go
@@ -0,0 +1,64 @@
+package diskv
+
+import (
+	"compress/flate"
+	"compress/gzip"
+	"compress/zlib"
+	"io"
+)
+
+// Compression is an interface that Diskv uses to implement compression of
+// data. Writer takes a destination io.Writer and returns a WriteCloser that
+// compresses all data written through it. Reader takes a source io.Reader and
+// returns a ReadCloser that decompresses all data read through it. You may
+// define these methods on your own type, or use one of the NewCompression
+// helpers.
+type Compression interface {
+	Writer(dst io.Writer) (io.WriteCloser, error)
+	Reader(src io.Reader) (io.ReadCloser, error)
+}
+
+// NewGzipCompression returns a Gzip-based Compression.
+func NewGzipCompression() Compression {
+	return NewGzipCompressionLevel(flate.DefaultCompression)
+}
+
+// NewGzipCompressionLevel returns a Gzip-based Compression with the given level.
+func NewGzipCompressionLevel(level int) Compression {
+	return &genericCompression{
+		wf: func(w io.Writer) (io.WriteCloser, error) { return gzip.NewWriterLevel(w, level) },
+		rf: func(r io.Reader) (io.ReadCloser, error) { return gzip.NewReader(r) },
+	}
+}
+
+// NewZlibCompression returns a Zlib-based Compression.
+func NewZlibCompression() Compression {
+	return NewZlibCompressionLevel(flate.DefaultCompression)
+}
+
+// NewZlibCompressionLevel returns a Zlib-based Compression with the given level.
+func NewZlibCompressionLevel(level int) Compression {
+	return NewZlibCompressionLevelDict(level, nil)
+}
+
+// NewZlibCompressionLevelDict returns a Zlib-based Compression with the given
+// level, based on the given dictionary.
+func NewZlibCompressionLevelDict(level int, dict []byte) Compression {
+	return &genericCompression{
+		func(w io.Writer) (io.WriteCloser, error) { return zlib.NewWriterLevelDict(w, level, dict) },
+		func(r io.Reader) (io.ReadCloser, error) { return zlib.NewReaderDict(r, dict) },
+	}
+}
+
+type genericCompression struct {
+	wf func(w io.Writer) (io.WriteCloser, error)
+	rf func(r io.Reader) (io.ReadCloser, error)
+}
+
+func (g *genericCompression) Writer(dst io.Writer) (io.WriteCloser, error) {
+	return g.wf(dst)
+}
+
+func (g *genericCompression) Reader(src io.Reader) (io.ReadCloser, error) {
+	return g.rf(src)
+}
diff --git a/vendor/github.com/peterbourgon/diskv/diskv.go b/vendor/github.com/peterbourgon/diskv/diskv.go
new file mode 100644
index 00000000..524dc0a6
--- /dev/null
+++ b/vendor/github.com/peterbourgon/diskv/diskv.go
@@ -0,0 +1,624 @@
+// Diskv (disk-vee) is a simple, persistent, key-value store.
+// It stores all data flatly on the filesystem.
+
+package diskv
+
+import (
+	"bytes"
+	"errors"
+	"fmt"
+	"io"
+	"io/ioutil"
+	"os"
+	"path/filepath"
+	"strings"
+	"sync"
+	"syscall"
+)
+
+const (
+	defaultBasePath             = "diskv"
+	defaultFilePerm os.FileMode = 0666
+	defaultPathPerm os.FileMode = 0777
+)
+
+var (
+	defaultTransform   = func(s string) []string { return []string{} }
+	errCanceled        = errors.New("canceled")
+	errEmptyKey        = errors.New("empty key")
+	errBadKey          = errors.New("bad key")
+	errImportDirectory = errors.New("can't import a directory")
+)
+
+// TransformFunction transforms a key into a slice of strings, with each
+// element in the slice representing a directory in the file path where the
+// key's entry will eventually be stored.
+//
+// For example, if TransformFunc transforms "abcdef" to ["ab", "cde", "f"],
+// the final location of the data file will be <basedir>/ab/cde/f/abcdef
+type TransformFunction func(s string) []string
+
+// Options define a set of properties that dictate Diskv behavior.
+// All values are optional.
+type Options struct {
+	BasePath     string
+	Transform    TransformFunction
+	CacheSizeMax uint64 // bytes
+	PathPerm     os.FileMode
+	FilePerm     os.FileMode
+	// If TempDir is set, it will enable filesystem atomic writes by
+	// writing temporary files to that location before being moved
+	// to BasePath.
+	// Note that TempDir MUST be on the same device/partition as
+	// BasePath.
+	TempDir string
+
+	Index     Index
+	IndexLess LessFunction
+
+	Compression Compression
+}
+
+// Diskv implements the Diskv interface. You shouldn't construct Diskv
+// structures directly; instead, use the New constructor.
+type Diskv struct {
+	Options
+	mu        sync.RWMutex
+	cache     map[string][]byte
+	cacheSize uint64
+}
+
+// New returns an initialized Diskv structure, ready to use.
+// If the path identified by baseDir already contains data,
+// it will be accessible, but not yet cached.
+func New(o Options) *Diskv {
+	if o.BasePath == "" {
+		o.BasePath = defaultBasePath
+	}
+	if o.Transform == nil {
+		o.Transform = defaultTransform
+	}
+	if o.PathPerm == 0 {
+		o.PathPerm = defaultPathPerm
+	}
+	if o.FilePerm == 0 {
+		o.FilePerm = defaultFilePerm
+	}
+
+	d := &Diskv{
+		Options:   o,
+		cache:     map[string][]byte{},
+		cacheSize: 0,
+	}
+
+	if d.Index != nil && d.IndexLess != nil {
+		d.Index.Initialize(d.IndexLess, d.Keys(nil))
+	}
+
+	return d
+}
+
+// Write synchronously writes the key-value pair to disk, making it immediately
+// available for reads. Write relies on the filesystem to perform an eventual
+// sync to physical media. If you need stronger guarantees, see WriteStream.
+func (d *Diskv) Write(key string, val []byte) error {
+	return d.WriteStream(key, bytes.NewBuffer(val), false)
+}
+
+// WriteStream writes the data represented by the io.Reader to the disk, under
+// the provided key. If sync is true, WriteStream performs an explicit sync on
+// the file as soon as it's written.
+//
+// bytes.Buffer provides io.Reader semantics for basic data types.
+func (d *Diskv) WriteStream(key string, r io.Reader, sync bool) error {
+	if len(key) <= 0 {
+		return errEmptyKey
+	}
+
+	d.mu.Lock()
+	defer d.mu.Unlock()
+
+	return d.writeStreamWithLock(key, r, sync)
+}
+
+// createKeyFileWithLock either creates the key file directly, or
+// creates a temporary file in TempDir if it is set.
+func (d *Diskv) createKeyFileWithLock(key string) (*os.File, error) {
+	if d.TempDir != "" {
+		if err := os.MkdirAll(d.TempDir, d.PathPerm); err != nil {
+			return nil, fmt.Errorf("temp mkdir: %s", err)
+		}
+		f, err := ioutil.TempFile(d.TempDir, "")
+		if err != nil {
+			return nil, fmt.Errorf("temp file: %s", err)
+		}
+
+		if err := f.Chmod(d.FilePerm); err != nil {
+			f.Close()           // error deliberately ignored
+			os.Remove(f.Name()) // error deliberately ignored
+			return nil, fmt.Errorf("chmod: %s", err)
+		}
+		return f, nil
+	}
+
+	mode := os.O_WRONLY | os.O_CREATE | os.O_TRUNC // overwrite if exists
+	f, err := os.OpenFile(d.completeFilename(key), mode, d.FilePerm)
+	if err != nil {
+		return nil, fmt.Errorf("open file: %s", err)
+	}
+	return f, nil
+}
+
+// writeStream does no input validation checking.
+func (d *Diskv) writeStreamWithLock(key string, r io.Reader, sync bool) error {
+	if err := d.ensurePathWithLock(key); err != nil {
+		return fmt.Errorf("ensure path: %s", err)
+	}
+
+	f, err := d.createKeyFileWithLock(key)
+	if err != nil {
+		return fmt.Errorf("create key file: %s", err)
+	}
+
+	wc := io.WriteCloser(&nopWriteCloser{f})
+	if d.Compression != nil {
+		wc, err = d.Compression.Writer(f)
+		if err != nil {
+			f.Close()           // error deliberately ignored
+			os.Remove(f.Name()) // error deliberately ignored
+			return fmt.Errorf("compression writer: %s", err)
+		}
+	}
+
+	if _, err := io.Copy(wc, r); err != nil {
+		f.Close()           // error deliberately ignored
+		os.Remove(f.Name()) // error deliberately ignored
+		return fmt.Errorf("i/o copy: %s", err)
+	}
+
+	if err := wc.Close(); err != nil {
+		f.Close()           // error deliberately ignored
+		os.Remove(f.Name()) // error deliberately ignored
+		return fmt.Errorf("compression close: %s", err)
+	}
+
+	if sync {
+		if err := f.Sync(); err != nil {
+			f.Close()           // error deliberately ignored
+			os.Remove(f.Name()) // error deliberately ignored
+			return fmt.Errorf("file sync: %s", err)
+		}
+	}
+
+	if err := f.Close(); err != nil {
+		return fmt.Errorf("file close: %s", err)
+	}
+
+	if f.Name() != d.completeFilename(key) {
+		if err := os.Rename(f.Name(), d.completeFilename(key)); err != nil {
+			os.Remove(f.Name()) // error deliberately ignored
+			return fmt.Errorf("rename: %s", err)
+		}
+	}
+
+	if d.Index != nil {
+		d.Index.Insert(key)
+	}
+
+	d.bustCacheWithLock(key) // cache only on read
+
+	return nil
+}
+
+// Import imports the source file into diskv under the destination key. If the
+// destination key already exists, it's overwritten. If move is true, the
+// source file is removed after a successful import.
+func (d *Diskv) Import(srcFilename, dstKey string, move bool) (err error) {
+	if dstKey == "" {
+		return errEmptyKey
+	}
+
+	if fi, err := os.Stat(srcFilename); err != nil {
+		return err
+	} else if fi.IsDir() {
+		return errImportDirectory
+	}
+
+	d.mu.Lock()
+	defer d.mu.Unlock()
+
+	if err := d.ensurePathWithLock(dstKey); err != nil {
+		return fmt.Errorf("ensure path: %s", err)
+	}
+
+	if move {
+		if err := syscall.Rename(srcFilename, d.completeFilename(dstKey)); err == nil {
+			d.bustCacheWithLock(dstKey)
+			return nil
+		} else if err != syscall.EXDEV {
+			// If it failed due to being on a different device, fall back to copying
+			return err
+		}
+	}
+
+	f, err := os.Open(srcFilename)
+	if err != nil {
+		return err
+	}
+	defer f.Close()
+	err = d.writeStreamWithLock(dstKey, f, false)
+	if err == nil && move {
+		err = os.Remove(srcFilename)
+	}
+	return err
+}
+
+// Read reads the key and returns the value.
+// If the key is available in the cache, Read won't touch the disk.
+// If the key is not in the cache, Read will have the side-effect of
+// lazily caching the value.
+func (d *Diskv) Read(key string) ([]byte, error) {
+	rc, err := d.ReadStream(key, false)
+	if err != nil {
+		return []byte{}, err
+	}
+	defer rc.Close()
+	return ioutil.ReadAll(rc)
+}
+
+// ReadStream reads the key and returns the value (data) as an io.ReadCloser.
+// If the value is cached from a previous read, and direct is false,
+// ReadStream will use the cached value. Otherwise, it will return a handle to
+// the file on disk, and cache the data on read.
+//
+// If direct is true, ReadStream will lazily delete any cached value for the
+// key, and return a direct handle to the file on disk.
+//
+// If compression is enabled, ReadStream taps into the io.Reader stream prior
+// to decompression, and caches the compressed data.
+func (d *Diskv) ReadStream(key string, direct bool) (io.ReadCloser, error) {
+	d.mu.RLock()
+	defer d.mu.RUnlock()
+
+	if val, ok := d.cache[key]; ok {
+		if !direct {
+			buf := bytes.NewBuffer(val)
+			if d.Compression != nil {
+				return d.Compression.Reader(buf)
+			}
+			return ioutil.NopCloser(buf), nil
+		}
+
+		go func() {
+			d.mu.Lock()
+			defer d.mu.Unlock()
+			d.uncacheWithLock(key, uint64(len(val)))
+		}()
+	}
+
+	return d.readWithRLock(key)
+}
+
+// read ignores the cache, and returns an io.ReadCloser representing the
+// decompressed data for the given key, streamed from the disk. Clients should
+// acquire a read lock on the Diskv and check the cache themselves before
+// calling read.
+func (d *Diskv) readWithRLock(key string) (io.ReadCloser, error) {
+	filename := d.completeFilename(key)
+
+	fi, err := os.Stat(filename)
+	if err != nil {
+		return nil, err
+	}
+	if fi.IsDir() {
+		return nil, os.ErrNotExist
+	}
+
+	f, err := os.Open(filename)
+	if err != nil {
+		return nil, err
+	}
+
+	var r io.Reader
+	if d.CacheSizeMax > 0 {
+		r = newSiphon(f, d, key)
+	} else {
+		r = &closingReader{f}
+	}
+
+	var rc = io.ReadCloser(ioutil.NopCloser(r))
+	if d.Compression != nil {
+		rc, err = d.Compression.Reader(r)
+		if err != nil {
+			return nil, err
+		}
+	}
+
+	return rc, nil
+}
+
+// closingReader provides a Reader that automatically closes the
+// embedded ReadCloser when it reaches EOF
+type closingReader struct {
+	rc io.ReadCloser
+}
+
+func (cr closingReader) Read(p []byte) (int, error) {
+	n, err := cr.rc.Read(p)
+	if err == io.EOF {
+		if closeErr := cr.rc.Close(); closeErr != nil {
+			return n, closeErr // close must succeed for Read to succeed
+		}
+	}
+	return n, err
+}
+
+// siphon is like a TeeReader: it copies all data read through it to an
+// internal buffer, and moves that buffer to the cache at EOF.
+type siphon struct {
+	f   *os.File
+	d   *Diskv
+	key string
+	buf *bytes.Buffer
+}
+
+// newSiphon constructs a siphoning reader that represents the passed file.
+// When a successful series of reads ends in an EOF, the siphon will write
+// the buffered data to Diskv's cache under the given key.
+func newSiphon(f *os.File, d *Diskv, key string) io.Reader {
+	return &siphon{
+		f:   f,
+		d:   d,
+		key: key,
+		buf: &bytes.Buffer{},
+	}
+}
+
+// Read implements the io.Reader interface for siphon.
+func (s *siphon) Read(p []byte) (int, error) {
+	n, err := s.f.Read(p)
+
+	if err == nil {
+		return s.buf.Write(p[0:n]) // Write must succeed for Read to succeed
+	}
+
+	if err == io.EOF {
+		s.d.cacheWithoutLock(s.key, s.buf.Bytes()) // cache may fail
+		if closeErr := s.f.Close(); closeErr != nil {
+			return n, closeErr // close must succeed for Read to succeed
+		}
+		return n, err
+	}
+
+	return n, err
+}
+
+// Erase synchronously erases the given key from the disk and the cache.
+func (d *Diskv) Erase(key string) error {
+	d.mu.Lock()
+	defer d.mu.Unlock()
+
+	d.bustCacheWithLock(key)
+
+	// erase from index
+	if d.Index != nil {
+		d.Index.Delete(key)
+	}
+
+	// erase from disk
+	filename := d.completeFilename(key)
+	if s, err := os.Stat(filename); err == nil {
+		if s.IsDir() {
+			return errBadKey
+		}
+		if err = os.Remove(filename); err != nil {
+			return err
+		}
+	} else {
+		// Return err as-is so caller can do os.IsNotExist(err).
+		return err
+	}
+
+	// clean up and return
+	d.pruneDirsWithLock(key)
+	return nil
+}
+
+// EraseAll will delete all of the data from the store, both in the cache and on
+// the disk. Note that EraseAll doesn't distinguish diskv-related data from non-
+// diskv-related data. Care should be taken to always specify a diskv base
+// directory that is exclusively for diskv data.
+func (d *Diskv) EraseAll() error {
+	d.mu.Lock()
+	defer d.mu.Unlock()
+	d.cache = make(map[string][]byte)
+	d.cacheSize = 0
+	if d.TempDir != "" {
+		os.RemoveAll(d.TempDir) // errors ignored
+	}
+	return os.RemoveAll(d.BasePath)
+}
+
+// Has returns true if the given key exists.
+func (d *Diskv) Has(key string) bool {
+	d.mu.Lock()
+	defer d.mu.Unlock()
+
+	if _, ok := d.cache[key]; ok {
+		return true
+	}
+
+	filename := d.completeFilename(key)
+	s, err := os.Stat(filename)
+	if err != nil {
+		return false
+	}
+	if s.IsDir() {
+		return false
+	}
+
+	return true
+}
+
+// Keys returns a channel that will yield every key accessible by the store,
+// in undefined order. If a cancel channel is provided, closing it will
+// terminate and close the keys channel.
+func (d *Diskv) Keys(cancel <-chan struct{}) <-chan string {
+	return d.KeysPrefix("", cancel)
+}
+
+// KeysPrefix returns a channel that will yield every key accessible by the
+// store with the given prefix, in undefined order. If a cancel channel is
+// provided, closing it will terminate and close the keys channel. If the
+// provided prefix is the empty string, all keys will be yielded.
+func (d *Diskv) KeysPrefix(prefix string, cancel <-chan struct{}) <-chan string {
+	var prepath string
+	if prefix == "" {
+		prepath = d.BasePath
+	} else {
+		prepath = d.pathFor(prefix)
+	}
+	c := make(chan string)
+	go func() {
+		filepath.Walk(prepath, walker(c, prefix, cancel))
+		close(c)
+	}()
+	return c
+}
+
+// walker returns a function which satisfies the filepath.WalkFunc interface.
+// It sends every non-directory file entry down the channel c.
+func walker(c chan<- string, prefix string, cancel <-chan struct{}) filepath.WalkFunc {
+	return func(path string, info os.FileInfo, err error) error {
+		if err != nil {
+			return err
+		}
+
+		if info.IsDir() || !strings.HasPrefix(info.Name(), prefix) {
+			return nil // "pass"
+		}
+
+		select {
+		case c <- info.Name():
+		case <-cancel:
+			return errCanceled
+		}
+
+		return nil
+	}
+}
+
+// pathFor returns the absolute path for location on the filesystem where the
+// data for the given key will be stored.
+func (d *Diskv) pathFor(key string) string {
+	return filepath.Join(d.BasePath, filepath.Join(d.Transform(key)...))
+}
+
+// ensurePathWithLock is a helper function that generates all necessary
+// directories on the filesystem for the given key.
+func (d *Diskv) ensurePathWithLock(key string) error {
+	return os.MkdirAll(d.pathFor(key), d.PathPerm)
+}
+
+// completeFilename returns the absolute path to the file for the given key.
+func (d *Diskv) completeFilename(key string) string {
+	return filepath.Join(d.pathFor(key), key)
+}
+
+// cacheWithLock attempts to cache the given key-value pair in the store's
+// cache. It can fail if the value is larger than the cache's maximum size.
+func (d *Diskv) cacheWithLock(key string, val []byte) error {
+	valueSize := uint64(len(val))
+	if err := d.ensureCacheSpaceWithLock(valueSize); err != nil {
+		return fmt.Errorf("%s; not caching", err)
+	}
+
+	// be very strict about memory guarantees
+	if (d.cacheSize + valueSize) > d.CacheSizeMax {
+		panic(fmt.Sprintf("failed to make room for value (%d/%d)", valueSize, d.CacheSizeMax))
+	}
+
+	d.cache[key] = val
+	d.cacheSize += valueSize
+	return nil
+}
+
+// cacheWithoutLock acquires the store's (write) mutex and calls cacheWithLock.
+func (d *Diskv) cacheWithoutLock(key string, val []byte) error {
+	d.mu.Lock()
+	defer d.mu.Unlock()
+	return d.cacheWithLock(key, val)
+}
+
+func (d *Diskv) bustCacheWithLock(key string) {
+	if val, ok := d.cache[key]; ok {
+		d.uncacheWithLock(key, uint64(len(val)))
+	}
+}
+
+func (d *Diskv) uncacheWithLock(key string, sz uint64) {
+	d.cacheSize -= sz
+	delete(d.cache, key)
+}
+
+// pruneDirsWithLock deletes empty directories in the path walk leading to the
+// key k. Typically this function is called after an Erase is made.
+func (d *Diskv) pruneDirsWithLock(key string) error {
+	pathlist := d.Transform(key)
+	for i := range pathlist {
+		dir := filepath.Join(d.BasePath, filepath.Join(pathlist[:len(pathlist)-i]...))
+
+		// thanks to Steven Blenkinsop for this snippet
+		switch fi, err := os.Stat(dir); true {
+		case err != nil:
+			return err
+		case !fi.IsDir():
+			panic(fmt.Sprintf("corrupt dirstate at %s", dir))
+		}
+
+		nlinks, err := filepath.Glob(filepath.Join(dir, "*"))
+		if err != nil {
+			return err
+		} else if len(nlinks) > 0 {
+			return nil // has subdirs -- do not prune
+		}
+		if err = os.Remove(dir); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+// ensureCacheSpaceWithLock deletes entries from the cache in arbitrary order
+// until the cache has at least valueSize bytes available.
+func (d *Diskv) ensureCacheSpaceWithLock(valueSize uint64) error {
+	if valueSize > d.CacheSizeMax {
+		return fmt.Errorf("value size (%d bytes) too large for cache (%d bytes)", valueSize, d.CacheSizeMax)
+	}
+
+	safe := func() bool { return (d.cacheSize + valueSize) <= d.CacheSizeMax }
+
+	for key, val := range d.cache {
+		if safe() {
+			break
+		}
+
+		d.uncacheWithLock(key, uint64(len(val)))
+	}
+
+	if !safe() {
+		panic(fmt.Sprintf("%d bytes still won't fit in the cache! (max %d bytes)", valueSize, d.CacheSizeMax))
+	}
+
+	return nil
+}
+
+// nopWriteCloser wraps an io.Writer and provides a no-op Close method to
+// satisfy the io.WriteCloser interface.
+type nopWriteCloser struct {
+	io.Writer
+}
+
+func (wc *nopWriteCloser) Write(p []byte) (int, error) { return wc.Writer.Write(p) }
+func (wc *nopWriteCloser) Close() error                { return nil }
diff --git a/vendor/github.com/peterbourgon/diskv/index.go b/vendor/github.com/peterbourgon/diskv/index.go
new file mode 100644
index 00000000..96fee515
--- /dev/null
+++ b/vendor/github.com/peterbourgon/diskv/index.go
@@ -0,0 +1,115 @@
+package diskv
+
+import (
+	"sync"
+
+	"github.com/google/btree"
+)
+
+// Index is a generic interface for things that can
+// provide an ordered list of keys.
+type Index interface {
+	Initialize(less LessFunction, keys <-chan string)
+	Insert(key string)
+	Delete(key string)
+	Keys(from string, n int) []string
+}
+
+// LessFunction is used to initialize an Index of keys in a specific order.
+type LessFunction func(string, string) bool
+
+// btreeString is a custom data type that satisfies the BTree Less interface,
+// making the strings it wraps sortable by the BTree package.
+type btreeString struct {
+	s string
+	l LessFunction
+}
+
+// Less satisfies the BTree.Less interface using the btreeString's LessFunction.
+func (s btreeString) Less(i btree.Item) bool {
+	return s.l(s.s, i.(btreeString).s)
+}
+
+// BTreeIndex is an implementation of the Index interface using google/btree.
+type BTreeIndex struct {
+	sync.RWMutex
+	LessFunction
+	*btree.BTree
+}
+
+// Initialize populates the BTree tree with data from the keys channel,
+// according to the passed less function. It's destructive to the BTreeIndex.
+func (i *BTreeIndex) Initialize(less LessFunction, keys <-chan string) {
+	i.Lock()
+	defer i.Unlock()
+	i.LessFunction = less
+	i.BTree = rebuild(less, keys)
+}
+
+// Insert inserts the given key (only) into the BTree tree.
+func (i *BTreeIndex) Insert(key string) {
+	i.Lock()
+	defer i.Unlock()
+	if i.BTree == nil || i.LessFunction == nil {
+		panic("uninitialized index")
+	}
+	i.BTree.ReplaceOrInsert(btreeString{s: key, l: i.LessFunction})
+}
+
+// Delete removes the given key (only) from the BTree tree.
+func (i *BTreeIndex) Delete(key string) {
+	i.Lock()
+	defer i.Unlock()
+	if i.BTree == nil || i.LessFunction == nil {
+		panic("uninitialized index")
+	}
+	i.BTree.Delete(btreeString{s: key, l: i.LessFunction})
+}
+
+// Keys yields a maximum of n keys in order. If the passed 'from' key is empty,
+// Keys will return the first n keys. If the passed 'from' key is non-empty, the
+// first key in the returned slice will be the key that immediately follows the
+// passed key, in key order.
+func (i *BTreeIndex) Keys(from string, n int) []string {
+	i.RLock()
+	defer i.RUnlock()
+
+	if i.BTree == nil || i.LessFunction == nil {
+		panic("uninitialized index")
+	}
+
+	if i.BTree.Len() <= 0 {
+		return []string{}
+	}
+
+	btreeFrom := btreeString{s: from, l: i.LessFunction}
+	skipFirst := true
+	if len(from) <= 0 || !i.BTree.Has(btreeFrom) {
+		// no such key, so fabricate an always-smallest item
+		btreeFrom = btreeString{s: "", l: func(string, string) bool { return true }}
+		skipFirst = false
+	}
+
+	keys := []string{}
+	iterator := func(i btree.Item) bool {
+		keys = append(keys, i.(btreeString).s)
+		return len(keys) < n
+	}
+	i.BTree.AscendGreaterOrEqual(btreeFrom, iterator)
+
+	if skipFirst && len(keys) > 0 {
+		keys = keys[1:]
+	}
+
+	return keys
+}
+
+// rebuildIndex does the work of regenerating the index
+// with the given keys.
+func rebuild(less LessFunction, keys <-chan string) *btree.BTree {
+	tree := btree.New(2)
+	for key := range keys {
+		tree.ReplaceOrInsert(btreeString{s: key, l: less})
+	}
+	return tree
+}
diff --git a/vendor/github.com/rancher/norman/objectclient/object_client.go b/vendor/github.com/rancher/norman/objectclient/object_client.go
index 70ca0803..fc251b1b 100644
--- a/vendor/github.com/rancher/norman/objectclient/object_client.go
+++ b/vendor/github.com/rancher/norman/objectclient/object_client.go
@@ -1,6 +1,7 @@
 package objectclient
 
 import (
+	"context"
 	"encoding/json"
 	"net/http"
 	"strings"
@@ -133,7 +134,7 @@ func (p *ObjectClient) Create(o runtime.Object) (runtime.Object, error) {
 		NamespaceIfScoped(ns, p.resource.Namespaced).
 		Resource(p.resource.Name).
 		Body(o).
-		Do().
+		Do(context.TODO()).
 		Into(result)
 	return result, err
 }
@@ -149,7 +150,7 @@ func (p *ObjectClient) GetNamespaced(namespace, name string, opts metav1.GetOpti
 		Resource(p.resource.Name).
 		VersionedParams(&opts, metav1.ParameterCodec).
 		Name(name).
-		Do().
+		Do(context.TODO()).
 		Into(result)
 	logrus.Tracef("REST GET %s/%s/%s/%s/%s/%s", p.getAPIPrefix(), p.gvk.Group, p.gvk.Version, namespace, p.resource.Name, name)
 	return result, err
@@ -164,7 +165,7 @@ func (p *ObjectClient) Get(name string, opts metav1.GetOptions) (runtime.Object,
 		Resource(p.resource.Name).
 		VersionedParams(&opts, metav1.ParameterCodec).
 		Name(name).
-		Do().
+		Do(context.TODO()).
 		Into(result)
 	logrus.Tracef("REST GET %s/%s/%s/%s/%s/%s", p.getAPIPrefix(), p.gvk.Group, p.gvk.Version, p.ns, p.resource.Name, name)
 	return result, err
@@ -186,7 +187,7 @@ func (p *ObjectClient) Update(name string, o runtime.Object) (runtime.Object, er
 		Resource(p.resource.Name).
 		Name(name).
 		Body(o).
-		Do().
+		Do(context.TODO()).
 		Into(result)
 	return result, err
 }
@@ -201,7 +202,7 @@ func (p *ObjectClient) DeleteNamespaced(namespace, name string, opts *metav1.Del
 	return req.Resource(p.resource.Name).
 		Name(name).
 		Body(opts).
-		Do().
+		Do(context.TODO()).
 		Error()
 }
 
@@ -213,7 +214,7 @@ func (p *ObjectClient) Delete(name string, opts *metav1.DeleteOptions) error {
 		Resource(p.resource.Name).
 		Name(name).
 		Body(opts).
-		Do().
+		Do(context.TODO()).
 		Error()
 }
 
@@ -225,7 +226,7 @@ func (p *ObjectClient) List(opts metav1.ListOptions) (runtime.Object, error) {
 		NamespaceIfScoped(p.ns, p.resource.Namespaced).
 		Resource(p.resource.Name).
 		VersionedParams(&opts, metav1.ParameterCodec).
-		Do().
+		Do(context.TODO()).
 		Into(result)
 }
 
@@ -237,7 +238,7 @@ func (p *ObjectClient) ListNamespaced(namespace string, opts metav1.ListOptions)
 		NamespaceIfScoped(namespace, p.resource.Namespaced).
 		Resource(p.resource.Name).
 		VersionedParams(&opts, metav1.ParameterCodec).
-		Do().
+		Do(context.TODO()).
 		Into(result)
 }
 
@@ -253,7 +254,7 @@ func (p *ObjectClient) Watch(opts metav1.ListOptions) (watch.Interface, error) {
 		NamespaceIfScoped(p.ns, p.resource.Namespaced).
 		Resource(p.resource.Name).
 		VersionedParams(&opts, metav1.ParameterCodec).
-		Stream()
+		Stream(context.TODO())
 	if err != nil {
 		return nil, err
 	}
@@ -302,7 +303,7 @@ func (p *ObjectClient) DeleteCollection(deleteOptions *metav1.DeleteOptions, lis
 		Resource(p.resource.Name).
 		VersionedParams(&listOptions, metav1.ParameterCodec).
 		Body(deleteOptions).
-		Do().
+		Do(context.TODO()).
 		Error()
 }
 
@@ -322,7 +323,7 @@ func (p *ObjectClient) Patch(name string, o runtime.Object, patchType types.Patc
 		SubResource(subresources...).
 		Name(name).
 		Body(data).
-		Do().
+		Do(context.TODO()).
 		Into(result)
 	return result, err
 }
diff --git a/vendor/github.com/rancher/norman/types/server_types.go b/vendor/github.com/rancher/norman/types/server_types.go
index 7e36aa22..70d6b690 100644
--- a/vendor/github.com/rancher/norman/types/server_types.go
+++ b/vendor/github.com/rancher/norman/types/server_types.go
@@ -174,6 +174,16 @@ func (r *APIContext) Filter(opts *QueryOptions, schema *Schema, obj interface{})
 	return nil
 }
 
+type Expire interface {
+	Expire(apiContext *APIContext, schema *Schema)
+}
+
+func (r *APIContext) ExpireAccessControl(schema *Schema) {
+	if e, ok := r.AccessControl.(Expire); ok {
+		e.Expire(r, schema)
+	}
+}
+
 var (
 	ASC  = SortOrder("asc")
 	DESC = SortOrder("desc")
diff --git a/vendor/github.com/russross/blackfriday/.gitignore b/vendor/github.com/russross/blackfriday/.gitignore
new file mode 100644
index 00000000..75623dcc
--- /dev/null
+++ b/vendor/github.com/russross/blackfriday/.gitignore
@@ -0,0 +1,8 @@
+*.out
+*.swp
+*.8
+*.6
+_obj
+_test*
+markdown
+tags
diff --git a/vendor/github.com/russross/blackfriday/.travis.yml b/vendor/github.com/russross/blackfriday/.travis.yml
new file mode 100644
index 00000000..2f3351d7
--- /dev/null
+++ b/vendor/github.com/russross/blackfriday/.travis.yml
@@ -0,0 +1,17 @@
+sudo: false
+language: go
+go:
+  - "1.9.x"
+  - "1.10.x"
+  - tip
+matrix:
+  fast_finish: true
+  allow_failures:
+    - go: tip
+install:
+  - # Do nothing. This is needed to prevent default install action "go get -t -v ./..." from happening here (we want it to happen inside script step).
+script:
+  - go get -t -v ./...
+  - diff -u <(echo -n) <(gofmt -d -s .)
+  - go tool vet .
+  - go test -v -race ./...
diff --git a/vendor/github.com/russross/blackfriday/LICENSE.txt b/vendor/github.com/russross/blackfriday/LICENSE.txt
new file mode 100644
index 00000000..2885af36
--- /dev/null
+++ b/vendor/github.com/russross/blackfriday/LICENSE.txt
@@ -0,0 +1,29 @@
+Blackfriday is distributed under the Simplified BSD License:
+
+> Copyright © 2011 Russ Ross
+> All rights reserved.
+>
+> Redistribution and use in source and binary forms, with or without
+> modification, are permitted provided that the following conditions
+> are met:
+>
+> 1.  Redistributions of source code must retain the above copyright
+>     notice, this list of conditions and the following disclaimer.
+>
+> 2.  Redistributions in binary form must reproduce the above
+>     copyright notice, this list of conditions and the following
+>     disclaimer in the documentation and/or other materials provided with
+>     the distribution.
+>
+> THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+> "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+> LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+> FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+> COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+> INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+> BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+> LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+> CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+> LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+> ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+> POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/russross/blackfriday/README.md b/vendor/github.com/russross/blackfriday/README.md
new file mode 100644
index 00000000..3c62e137
--- /dev/null
+++ b/vendor/github.com/russross/blackfriday/README.md
@@ -0,0 +1,369 @@
+Blackfriday
+[![Build Status][BuildSVG]][BuildURL]
+[![Godoc][GodocV2SVG]][GodocV2URL]
+===========
+
+Blackfriday is a [Markdown][1] processor implemented in [Go][2]. It
+is paranoid about its input (so you can safely feed it user-supplied
+data), it is fast, it supports common extensions (tables, smart
+punctuation substitutions, etc.), and it is safe for all utf-8
+(unicode) input.
+
+HTML output is currently supported, along with Smartypants
+extensions.
+
+It started as a translation from C of [Sundown][3].
+
+
+Installation
+------------
+
+Blackfriday is compatible with any modern Go release. With Go and git installed:
+
+    go get -u gopkg.in/russross/blackfriday.v2
+
+will download, compile, and install the package into your `$GOPATH` directory
+hierarchy.
+
+
+Versions
+--------
+
+Currently maintained and recommended version of Blackfriday is `v2`. It's being
+developed on its own branch: https://github.com/russross/blackfriday/tree/v2 and the
+documentation is available at
+https://godoc.org/gopkg.in/russross/blackfriday.v2.
+
+It is `go get`-able via [gopkg.in][6] at `gopkg.in/russross/blackfriday.v2`,
+but we highly recommend using package management tool like [dep][7] or
+[Glide][8] and make use of semantic versioning. With package management you
+should import `github.com/russross/blackfriday` and specify that you're using
+version 2.0.0.
+
+Version 2 offers a number of improvements over v1:
+
+* Cleaned up API
+* A separate call to [`Parse`][4], which produces an abstract syntax tree for
+  the document
+* Latest bug fixes
+* Flexibility to easily add your own rendering extensions
+
+Potential drawbacks:
+
+* Our benchmarks show v2 to be slightly slower than v1. Currently in the
+  ballpark of around 15%.
+* API breakage. If you can't afford modifying your code to adhere to the new API
+  and don't care too much about the new features, v2 is probably not for you.
+* Several bug fixes are trailing behind and still need to be forward-ported to
+  v2. See issue [#348](https://github.com/russross/blackfriday/issues/348) for
+  tracking.
+
+If you are still interested in the legacy `v1`, you can import it from
+`github.com/russross/blackfriday`. Documentation for the legacy v1 can be found
+here: https://godoc.org/github.com/russross/blackfriday
+
+### Known issue with `dep`
+
+There is a known problem with using Blackfriday v1 _transitively_ and `dep`.
+Currently `dep` prioritizes semver versions over anything else, and picks the
+latest one, plus it does not apply a `[[constraint]]` specifier to transitively
+pulled in packages. So if you're using something that uses Blackfriday v1, but
+that something does not use `dep` yet, you will get Blackfriday v2 pulled in and
+your first dependency will fail to build.
+
+There are couple of fixes for it, documented here:
+https://github.com/golang/dep/blob/master/docs/FAQ.md#how-do-i-constrain-a-transitive-dependencys-version
+
+Meanwhile, `dep` team is working on a more general solution to the constraints
+on transitive dependencies problem: https://github.com/golang/dep/issues/1124.
+
+
+Usage
+-----
+
+### v1
+
+For basic usage, it is as simple as getting your input into a byte
+slice and calling:
+
+    output := blackfriday.MarkdownBasic(input)
+
+This renders it with no extensions enabled. To get a more useful
+feature set, use this instead:
+
+    output := blackfriday.MarkdownCommon(input)
+
+### v2
+
+For the most sensible markdown processing, it is as simple as getting your input
+into a byte slice and calling:
+
+```go
+output := blackfriday.Run(input)
+```
+
+Your input will be parsed and the output rendered with a set of most popular
+extensions enabled. If you want the most basic feature set, corresponding with
+the bare Markdown specification, use:
+
+```go
+output := blackfriday.Run(input, blackfriday.WithNoExtensions())
+```
+
+### Sanitize untrusted content
+
+Blackfriday itself does nothing to protect against malicious content. If you are
+dealing with user-supplied markdown, we recommend running Blackfriday's output
+through HTML sanitizer such as [Bluemonday][5].
+
+Here's an example of simple usage of Blackfriday together with Bluemonday:
+
+```go
+import (
+    "github.com/microcosm-cc/bluemonday"
+    "gopkg.in/russross/blackfriday.v2"
+)
+
+// ...
+unsafe := blackfriday.Run(input)
+html := bluemonday.UGCPolicy().SanitizeBytes(unsafe)
+```
+
+### Custom options, v1
+
+If you want to customize the set of options, first get a renderer
+(currently only the HTML output engine), then use it to
+call the more general `Markdown` function. For examples, see the
+implementations of `MarkdownBasic` and `MarkdownCommon` in
+`markdown.go`.
+
+### Custom options, v2
+
+If you want to customize the set of options, use `blackfriday.WithExtensions`,
+`blackfriday.WithRenderer` and `blackfriday.WithRefOverride`.
+
+### `blackfriday-tool`
+
+You can also check out `blackfriday-tool` for a more complete example
+of how to use it. Download and install it using:
+
+    go get github.com/russross/blackfriday-tool
+
+This is a simple command-line tool that allows you to process a
+markdown file using a standalone program.  You can also browse the
+source directly on github if you are just looking for some example
+code:
+
+* <http://github.com/russross/blackfriday-tool>
+
+Note that if you have not already done so, installing
+`blackfriday-tool` will be sufficient to download and install
+blackfriday in addition to the tool itself. The tool binary will be
+installed in `$GOPATH/bin`.  This is a statically-linked binary that
+can be copied to wherever you need it without worrying about
+dependencies and library versions.
+
+### Sanitized anchor names
+
+Blackfriday includes an algorithm for creating sanitized anchor names
+corresponding to a given input text. This algorithm is used to create
+anchors for headings when `EXTENSION_AUTO_HEADER_IDS` is enabled. The
+algorithm has a specification, so that other packages can create
+compatible anchor names and links to those anchors.
+
+The specification is located at https://godoc.org/github.com/russross/blackfriday#hdr-Sanitized_Anchor_Names.
+
+[`SanitizedAnchorName`](https://godoc.org/github.com/russross/blackfriday#SanitizedAnchorName) exposes this functionality, and can be used to
+create compatible links to the anchor names generated by blackfriday.
+This algorithm is also implemented in a small standalone package at
+[`github.com/shurcooL/sanitized_anchor_name`](https://godoc.org/github.com/shurcooL/sanitized_anchor_name). It can be useful for clients
+that want a small package and don't need full functionality of blackfriday.
+
+
+Features
+--------
+
+All features of Sundown are supported, including:
+
+*   **Compatibility**. The Markdown v1.0.3 test suite passes with
+    the `--tidy` option.  Without `--tidy`, the differences are
+    mostly in whitespace and entity escaping, where blackfriday is
+    more consistent and cleaner.
+
+*   **Common extensions**, including table support, fenced code
+    blocks, autolinks, strikethroughs, non-strict emphasis, etc.
+
+*   **Safety**. Blackfriday is paranoid when parsing, making it safe
+    to feed untrusted user input without fear of bad things
+    happening. The test suite stress tests this and there are no
+    known inputs that make it crash.  If you find one, please let me
+    know and send me the input that does it.
+
+    NOTE: "safety" in this context means *runtime safety only*. In order to
+    protect yourself against JavaScript injection in untrusted content, see
+    [this example](https://github.com/russross/blackfriday#sanitize-untrusted-content).
+
+*   **Fast processing**. It is fast enough to render on-demand in
+    most web applications without having to cache the output.
+
+*   **Thread safety**. You can run multiple parsers in different
+    goroutines without ill effect. There is no dependence on global
+    shared state.
+
+*   **Minimal dependencies**. Blackfriday only depends on standard
+    library packages in Go. The source code is pretty
+    self-contained, so it is easy to add to any project, including
+    Google App Engine projects.
+
+*   **Standards compliant**. Output successfully validates using the
+    W3C validation tool for HTML 4.01 and XHTML 1.0 Transitional.
+
+
+Extensions
+----------
+
+In addition to the standard markdown syntax, this package
+implements the following extensions:
+
+*   **Intra-word emphasis supression**. The `_` character is
+    commonly used inside words when discussing code, so having
+    markdown interpret it as an emphasis command is usually the
+    wrong thing. Blackfriday lets you treat all emphasis markers as
+    normal characters when they occur inside a word.
+
+*   **Tables**. Tables can be created by drawing them in the input
+    using a simple syntax:
+
+    ```
+    Name    | Age
+    --------|------
+    Bob     | 27
+    Alice   | 23
+    ```
+
+*   **Fenced code blocks**. In addition to the normal 4-space
+    indentation to mark code blocks, you can explicitly mark them
+    and supply a language (to make syntax highlighting simple). Just
+    mark it like this:
+
+        ``` go
+        func getTrue() bool {
+            return true
+        }
+        ```
+
+    You can use 3 or more backticks to mark the beginning of the
+    block, and the same number to mark the end of the block.
+
+    To preserve classes of fenced code blocks while using the bluemonday
+    HTML sanitizer, use the following policy:
+
+    ``` go
+    p := bluemonday.UGCPolicy()
+    p.AllowAttrs("class").Matching(regexp.MustCompile("^language-[a-zA-Z0-9]+$")).OnElements("code")
+    html := p.SanitizeBytes(unsafe)
+    ```
+
+*   **Definition lists**. A simple definition list is made of a single-line
+    term followed by a colon and the definition for that term.
+
+        Cat
+        : Fluffy animal everyone likes
+        
+        Internet
+        : Vector of transmission for pictures of cats
+
+    Terms must be separated from the previous definition by a blank line.
+
+*   **Footnotes**. A marker in the text that will become a superscript number;
+    a footnote definition that will be placed in a list of footnotes at the
+    end of the document. A footnote looks like this:
+
+        This is a footnote.[^1]
+        
+        [^1]: the footnote text.
+
+*   **Autolinking**. Blackfriday can find URLs that have not been
+    explicitly marked as links and turn them into links.
+
+*   **Strikethrough**. Use two tildes (`~~`) to mark text that
+    should be crossed out.
+
+*   **Hard line breaks**. With this extension enabled (it is off by
+    default in the `MarkdownBasic` and `MarkdownCommon` convenience
+    functions), newlines in the input translate into line breaks in
+    the output.
+
+*   **Smart quotes**. Smartypants-style punctuation substitution is
+    supported, turning normal double- and single-quote marks into
+    curly quotes, etc.
+
+*   **LaTeX-style dash parsing** is an additional option, where `--`
+    is translated into `&ndash;`, and `---` is translated into
+    `&mdash;`. This differs from most smartypants processors, which
+    turn a single hyphen into an ndash and a double hyphen into an
+    mdash.
+
+*   **Smart fractions**, where anything that looks like a fraction
+    is translated into suitable HTML (instead of just a few special
+    cases like most smartypant processors). For example, `4/5`
+    becomes `<sup>4</sup>&frasl;<sub>5</sub>`, which renders as
+    <sup>4</sup>&frasl;<sub>5</sub>.
+
+
+Other renderers
+---------------
+
+Blackfriday is structured to allow alternative rendering engines. Here
+are a few of note:
+
+*   [github_flavored_markdown](https://godoc.org/github.com/shurcooL/github_flavored_markdown):
+    provides a GitHub Flavored Markdown renderer with fenced code block
+    highlighting, clickable heading anchor links.
+
+    It's not customizable, and its goal is to produce HTML output
+    equivalent to the [GitHub Markdown API endpoint](https://developer.github.com/v3/markdown/#render-a-markdown-document-in-raw-mode),
+    except the rendering is performed locally.
+
+*   [markdownfmt](https://github.com/shurcooL/markdownfmt): like gofmt,
+    but for markdown.
+
+*   [LaTeX output](https://bitbucket.org/ambrevar/blackfriday-latex):
+    renders output as LaTeX.
+
+*   [bfchroma](https://github.com/Depado/bfchroma/): provides convenience
+    integration with the [Chroma](https://github.com/alecthomas/chroma) code
+    highlighting library. bfchroma is only compatible with v2 of Blackfriday and
+    provides a drop-in renderer ready to use with Blackfriday, as well as
+    options and means for further customization.
+
+
+TODO
+----
+
+*   More unit testing
+*   Improve Unicode support. It does not understand all Unicode
+    rules (about what constitutes a letter, a punctuation symbol,
+    etc.), so it may fail to detect word boundaries correctly in
+    some instances. It is safe on all UTF-8 input.
+
+
+License
+-------
+
+[Blackfriday is distributed under the Simplified BSD License](LICENSE.txt)
+
+
+   [1]: https://daringfireball.net/projects/markdown/ "Markdown"
+   [2]: https://golang.org/ "Go Language"
+   [3]: https://github.com/vmg/sundown "Sundown"
+   [4]: https://godoc.org/gopkg.in/russross/blackfriday.v2#Parse "Parse func"
+   [5]: https://github.com/microcosm-cc/bluemonday "Bluemonday"
+   [6]: https://labix.org/gopkg.in "gopkg.in"
+   [7]: https://github.com/golang/dep/ "dep"
+   [8]: https://github.com/Masterminds/glide "Glide"
+
+   [BuildSVG]: https://travis-ci.org/russross/blackfriday.svg?branch=master
+   [BuildURL]: https://travis-ci.org/russross/blackfriday
+   [GodocV2SVG]: https://godoc.org/gopkg.in/russross/blackfriday.v2?status.svg
+   [GodocV2URL]: https://godoc.org/gopkg.in/russross/blackfriday.v2
diff --git a/vendor/github.com/russross/blackfriday/block.go b/vendor/github.com/russross/blackfriday/block.go
new file mode 100644
index 00000000..45c21a6c
--- /dev/null
+++ b/vendor/github.com/russross/blackfriday/block.go
@@ -0,0 +1,1474 @@
+//
+// Blackfriday Markdown Processor
+// Available at http://github.com/russross/blackfriday
+//
+// Copyright © 2011 Russ Ross <russ@russross.com>.
+// Distributed under the Simplified BSD License.
+// See README.md for details.
+//
+
+//
+// Functions to parse block-level elements.
+//
+
+package blackfriday
+
+import (
+	"bytes"
+	"strings"
+	"unicode"
+)
+
+// Parse block-level data.
+// Note: this function and many that it calls assume that
+// the input buffer ends with a newline.
+func (p *parser) block(out *bytes.Buffer, data []byte) {
+	if len(data) == 0 || data[len(data)-1] != '\n' {
+		panic("block input is missing terminating newline")
+	}
+
+	// this is called recursively: enforce a maximum depth
+	if p.nesting >= p.maxNesting {
+		return
+	}
+	p.nesting++
+
+	// parse out one block-level construct at a time
+	for len(data) > 0 {
+		// prefixed header:
+		//
+		// # Header 1
+		// ## Header 2
+		// ...
+		// ###### Header 6
+		if p.isPrefixHeader(data) {
+			data = data[p.prefixHeader(out, data):]
+			continue
+		}
+
+		// block of preformatted HTML:
+		//
+		// <div>
+		//     ...
+		// </div>
+		if data[0] == '<' {
+			if i := p.html(out, data, true); i > 0 {
+				data = data[i:]
+				continue
+			}
+		}
+
+		// title block
+		//
+		// % stuff
+		// % more stuff
+		// % even more stuff
+		if p.flags&EXTENSION_TITLEBLOCK != 0 {
+			if data[0] == '%' {
+				if i := p.titleBlock(out, data, true); i > 0 {
+					data = data[i:]
+					continue
+				}
+			}
+		}
+
+		// blank lines.  note: returns the # of bytes to skip
+		if i := p.isEmpty(data); i > 0 {
+			data = data[i:]
+			continue
+		}
+
+		// indented code block:
+		//
+		//     func max(a, b int) int {
+		//         if a > b {
+		//             return a
+		//         }
+		//         return b
+		//      }
+		if p.codePrefix(data) > 0 {
+			data = data[p.code(out, data):]
+			continue
+		}
+
+		// fenced code block:
+		//
+		// ``` go info string here
+		// func fact(n int) int {
+		//     if n <= 1 {
+		//         return n
+		//     }
+		//     return n * fact(n-1)
+		// }
+		// ```
+		if p.flags&EXTENSION_FENCED_CODE != 0 {
+			if i := p.fencedCodeBlock(out, data, true); i > 0 {
+				data = data[i:]
+				continue
+			}
+		}
+
+		// horizontal rule:
+		//
+		// ------
+		// or
+		// ******
+		// or
+		// ______
+		if p.isHRule(data) {
+			p.r.HRule(out)
+			var i int
+			for i = 0; data[i] != '\n'; i++ {
+			}
+			data = data[i:]
+			continue
+		}
+
+		// block quote:
+		//
+		// > A big quote I found somewhere
+		// > on the web
+		if p.quotePrefix(data) > 0 {
+			data = data[p.quote(out, data):]
+			continue
+		}
+
+		// table:
+		//
+		// Name  | Age | Phone
+		// ------|-----|---------
+		// Bob   | 31  | 555-1234
+		// Alice | 27  | 555-4321
+		if p.flags&EXTENSION_TABLES != 0 {
+			if i := p.table(out, data); i > 0 {
+				data = data[i:]
+				continue
+			}
+		}
+
+		// an itemized/unordered list:
+		//
+		// * Item 1
+		// * Item 2
+		//
+		// also works with + or -
+		if p.uliPrefix(data) > 0 {
+			data = data[p.list(out, data, 0):]
+			continue
+		}
+
+		// a numbered/ordered list:
+		//
+		// 1. Item 1
+		// 2. Item 2
+		if p.oliPrefix(data) > 0 {
+			data = data[p.list(out, data, LIST_TYPE_ORDERED):]
+			continue
+		}
+
+		// definition lists:
+		//
+		// Term 1
+		// :   Definition a
+		// :   Definition b
+		//
+		// Term 2
+		// :   Definition c
+		if p.flags&EXTENSION_DEFINITION_LISTS != 0 {
+			if p.dliPrefix(data) > 0 {
+				data = data[p.list(out, data, LIST_TYPE_DEFINITION):]
+				continue
+			}
+		}
+
+		// anything else must look like a normal paragraph
+		// note: this finds underlined headers, too
+		data = data[p.paragraph(out, data):]
+	}
+
+	p.nesting--
+}
+
+func (p *parser) isPrefixHeader(data []byte) bool {
+	if data[0] != '#' {
+		return false
+	}
+
+	if p.flags&EXTENSION_SPACE_HEADERS != 0 {
+		level := 0
+		for level < 6 && data[level] == '#' {
+			level++
+		}
+		if data[level] != ' ' {
+			return false
+		}
+	}
+	return true
+}
+
+func (p *parser) prefixHeader(out *bytes.Buffer, data []byte) int {
+	level := 0
+	for level < 6 && data[level] == '#' {
+		level++
+	}
+	i := skipChar(data, level, ' ')
+	end := skipUntilChar(data, i, '\n')
+	skip := end
+	id := ""
+	if p.flags&EXTENSION_HEADER_IDS != 0 {
+		j, k := 0, 0
+		// find start/end of header id
+		for j = i; j < end-1 && (data[j] != '{' || data[j+1] != '#'); j++ {
+		}
+		for k = j + 1; k < end && data[k] != '}'; k++ {
+		}
+		// extract header id iff found
+		if j < end && k < end {
+			id = string(data[j+2 : k])
+			end = j
+			skip = k + 1
+			for end > 0 && data[end-1] == ' ' {
+				end--
+			}
+		}
+	}
+	for end > 0 && data[end-1] == '#' {
+		if isBackslashEscaped(data, end-1) {
+			break
+		}
+		end--
+	}
+	for end > 0 && data[end-1] == ' ' {
+		end--
+	}
+	if end > i {
+		if id == "" && p.flags&EXTENSION_AUTO_HEADER_IDS != 0 {
+			id = SanitizedAnchorName(string(data[i:end]))
+		}
+		work := func() bool {
+			p.inline(out, data[i:end])
+			return true
+		}
+		p.r.Header(out, work, level, id)
+	}
+	return skip
+}
+
+func (p *parser) isUnderlinedHeader(data []byte) int {
+	// test of level 1 header
+	if data[0] == '=' {
+		i := skipChar(data, 1, '=')
+		i = skipChar(data, i, ' ')
+		if data[i] == '\n' {
+			return 1
+		} else {
+			return 0
+		}
+	}
+
+	// test of level 2 header
+	if data[0] == '-' {
+		i := skipChar(data, 1, '-')
+		i = skipChar(data, i, ' ')
+		if data[i] == '\n' {
+			return 2
+		} else {
+			return 0
+		}
+	}
+
+	return 0
+}
+
+func (p *parser) titleBlock(out *bytes.Buffer, data []byte, doRender bool) int {
+	if data[0] != '%' {
+		return 0
+	}
+	splitData := bytes.Split(data, []byte("\n"))
+	var i int
+	for idx, b := range splitData {
+		if !bytes.HasPrefix(b, []byte("%")) {
+			i = idx // - 1
+			break
+		}
+	}
+
+	data = bytes.Join(splitData[0:i], []byte("\n"))
+	p.r.TitleBlock(out, data)
+
+	return len(data)
+}
+
+func (p *parser) html(out *bytes.Buffer, data []byte, doRender bool) int {
+	var i, j int
+
+	// identify the opening tag
+	if data[0] != '<' {
+		return 0
+	}
+	curtag, tagfound := p.htmlFindTag(data[1:])
+
+	// handle special cases
+	if !tagfound {
+		// check for an HTML comment
+		if size := p.htmlComment(out, data, doRender); size > 0 {
+			return size
+		}
+
+		// check for an <hr> tag
+		if size := p.htmlHr(out, data, doRender); size > 0 {
+			return size
+		}
+
+		// check for HTML CDATA
+		if size := p.htmlCDATA(out, data, doRender); size > 0 {
+			return size
+		}
+
+		// no special case recognized
+		return 0
+	}
+
+	// look for an unindented matching closing tag
+	// followed by a blank line
+	found := false
+	/*
+		closetag := []byte("\n</" + curtag + ">")
+		j = len(curtag) + 1
+		for !found {
+			// scan for a closing tag at the beginning of a line
+			if skip := bytes.Index(data[j:], closetag); skip >= 0 {
+				j += skip + len(closetag)
+			} else {
+				break
+			}
+
+			// see if it is the only thing on the line
+			if skip := p.isEmpty(data[j:]); skip > 0 {
+				// see if it is followed by a blank line/eof
+				j += skip
+				if j >= len(data) {
+					found = true
+					i = j
+				} else {
+					if skip := p.isEmpty(data[j:]); skip > 0 {
+						j += skip
+						found = true
+						i = j
+					}
+				}
+			}
+		}
+	*/
+
+	// if not found, try a second pass looking for indented match
+	// but not if tag is "ins" or "del" (following original Markdown.pl)
+	if !found && curtag != "ins" && curtag != "del" {
+		i = 1
+		for i < len(data) {
+			i++
+			for i < len(data) && !(data[i-1] == '<' && data[i] == '/') {
+				i++
+			}
+
+			if i+2+len(curtag) >= len(data) {
+				break
+			}
+
+			j = p.htmlFindEnd(curtag, data[i-1:])
+
+			if j > 0 {
+				i += j - 1
+				found = true
+				break
+			}
+		}
+	}
+
+	if !found {
+		return 0
+	}
+
+	// the end of the block has been found
+	if doRender {
+		// trim newlines
+		end := i
+		for end > 0 && data[end-1] == '\n' {
+			end--
+		}
+		p.r.BlockHtml(out, data[:end])
+	}
+
+	return i
+}
+
+func (p *parser) renderHTMLBlock(out *bytes.Buffer, data []byte, start int, doRender bool) int {
+	// html block needs to end with a blank line
+	if i := p.isEmpty(data[start:]); i > 0 {
+		size := start + i
+		if doRender {
+			// trim trailing newlines
+			end := size
+			for end > 0 && data[end-1] == '\n' {
+				end--
+			}
+			p.r.BlockHtml(out, data[:end])
+		}
+		return size
+	}
+	return 0
+}
+
+// HTML comment, lax form
+func (p *parser) htmlComment(out *bytes.Buffer, data []byte, doRender bool) int {
+	i := p.inlineHTMLComment(out, data)
+	return p.renderHTMLBlock(out, data, i, doRender)
+}
+
+// HTML CDATA section
+func (p *parser) htmlCDATA(out *bytes.Buffer, data []byte, doRender bool) int {
+	const cdataTag = "<![cdata["
+	const cdataTagLen = len(cdataTag)
+	if len(data) < cdataTagLen+1 {
+		return 0
+	}
+	if !bytes.Equal(bytes.ToLower(data[:cdataTagLen]), []byte(cdataTag)) {
+		return 0
+	}
+	i := cdataTagLen
+	// scan for an end-of-comment marker, across lines if necessary
+	for i < len(data) && !(data[i-2] == ']' && data[i-1] == ']' && data[i] == '>') {
+		i++
+	}
+	i++
+	// no end-of-comment marker
+	if i >= len(data) {
+		return 0
+	}
+	return p.renderHTMLBlock(out, data, i, doRender)
+}
+
+// HR, which is the only self-closing block tag considered
+func (p *parser) htmlHr(out *bytes.Buffer, data []byte, doRender bool) int {
+	if data[0] != '<' || (data[1] != 'h' && data[1] != 'H') || (data[2] != 'r' && data[2] != 'R') {
+		return 0
+	}
+	if data[3] != ' ' && data[3] != '/' && data[3] != '>' {
+		// not an <hr> tag after all; at least not a valid one
+		return 0
+	}
+
+	i := 3
+	for data[i] != '>' && data[i] != '\n' {
+		i++
+	}
+
+	if data[i] == '>' {
+		return p.renderHTMLBlock(out, data, i+1, doRender)
+	}
+
+	return 0
+}
+
+func (p *parser) htmlFindTag(data []byte) (string, bool) {
+	i := 0
+	for isalnum(data[i]) {
+		i++
+	}
+	key := string(data[:i])
+	if _, ok := blockTags[key]; ok {
+		return key, true
+	}
+	return "", false
+}
+
+func (p *parser) htmlFindEnd(tag string, data []byte) int {
+	// assume data[0] == '<' && data[1] == '/' already tested
+
+	// check if tag is a match
+	closetag := []byte("</" + tag + ">")
+	if !bytes.HasPrefix(data, closetag) {
+		return 0
+	}
+	i := len(closetag)
+
+	// check that the rest of the line is blank
+	skip := 0
+	if skip = p.isEmpty(data[i:]); skip == 0 {
+		return 0
+	}
+	i += skip
+	skip = 0
+
+	if i >= len(data) {
+		return i
+	}
+
+	if p.flags&EXTENSION_LAX_HTML_BLOCKS != 0 {
+		return i
+	}
+	if skip = p.isEmpty(data[i:]); skip == 0 {
+		// following line must be blank
+		return 0
+	}
+
+	return i + skip
+}
+
+func (*parser) isEmpty(data []byte) int {
+	// it is okay to call isEmpty on an empty buffer
+	if len(data) == 0 {
+		return 0
+	}
+
+	var i int
+	for i = 0; i < len(data) && data[i] != '\n'; i++ {
+		if data[i] != ' ' && data[i] != '\t' {
+			return 0
+		}
+	}
+	return i + 1
+}
+
+func (*parser) isHRule(data []byte) bool {
+	i := 0
+
+	// skip up to three spaces
+	for i < 3 && data[i] == ' ' {
+		i++
+	}
+
+	// look at the hrule char
+	if data[i] != '*' && data[i] != '-' && data[i] != '_' {
+		return false
+	}
+	c := data[i]
+
+	// the whole line must be the char or whitespace
+	n := 0
+	for data[i] != '\n' {
+		switch {
+		case data[i] == c:
+			n++
+		case data[i] != ' ':
+			return false
+		}
+		i++
+	}
+
+	return n >= 3
+}
+
+// isFenceLine checks if there's a fence line (e.g., ``` or ``` go) at the beginning of data,
+// and returns the end index if so, or 0 otherwise. It also returns the marker found.
+// If syntax is not nil, it gets set to the syntax specified in the fence line.
+// A final newline is mandatory to recognize the fence line, unless newlineOptional is true.
+func isFenceLine(data []byte, info *string, oldmarker string, newlineOptional bool) (end int, marker string) {
+	i, size := 0, 0
+
+	// skip up to three spaces
+	for i < len(data) && i < 3 && data[i] == ' ' {
+		i++
+	}
+
+	// check for the marker characters: ~ or `
+	if i >= len(data) {
+		return 0, ""
+	}
+	if data[i] != '~' && data[i] != '`' {
+		return 0, ""
+	}
+
+	c := data[i]
+
+	// the whole line must be the same char or whitespace
+	for i < len(data) && data[i] == c {
+		size++
+		i++
+	}
+
+	// the marker char must occur at least 3 times
+	if size < 3 {
+		return 0, ""
+	}
+	marker = string(data[i-size : i])
+
+	// if this is the end marker, it must match the beginning marker
+	if oldmarker != "" && marker != oldmarker {
+		return 0, ""
+	}
+
+	// TODO(shurcooL): It's probably a good idea to simplify the 2 code paths here
+	// into one, always get the info string, and discard it if the caller doesn't care.
+	if info != nil {
+		infoLength := 0
+		i = skipChar(data, i, ' ')
+
+		if i >= len(data) {
+			if newlineOptional && i == len(data) {
+				return i, marker
+			}
+			return 0, ""
+		}
+
+		infoStart := i
+
+		if data[i] == '{' {
+			i++
+			infoStart++
+
+			for i < len(data) && data[i] != '}' && data[i] != '\n' {
+				infoLength++
+				i++
+			}
+
+			if i >= len(data) || data[i] != '}' {
+				return 0, ""
+			}
+
+			// strip all whitespace at the beginning and the end
+			// of the {} block
+			for infoLength > 0 && isspace(data[infoStart]) {
+				infoStart++
+				infoLength--
+			}
+
+			for infoLength > 0 && isspace(data[infoStart+infoLength-1]) {
+				infoLength--
+			}
+
+			i++
+		} else {
+			for i < len(data) && !isverticalspace(data[i]) {
+				infoLength++
+				i++
+			}
+		}
+
+		*info = strings.TrimSpace(string(data[infoStart : infoStart+infoLength]))
+	}
+
+	i = skipChar(data, i, ' ')
+	if i >= len(data) || data[i] != '\n' {
+		if newlineOptional && i == len(data) {
+			return i, marker
+		}
+		return 0, ""
+	}
+
+	return i + 1, marker // Take newline into account.
+}
+
+// fencedCodeBlock returns the end index if data contains a fenced code block at the beginning,
+// or 0 otherwise. It writes to out if doRender is true, otherwise it has no side effects.
+// If doRender is true, a final newline is mandatory to recognize the fenced code block.
+func (p *parser) fencedCodeBlock(out *bytes.Buffer, data []byte, doRender bool) int {
+	var infoString string
+	beg, marker := isFenceLine(data, &infoString, "", false)
+	if beg == 0 || beg >= len(data) {
+		return 0
+	}
+
+	var work bytes.Buffer
+
+	for {
+		// safe to assume beg < len(data)
+
+		// check for the end of the code block
+		newlineOptional := !doRender
+		fenceEnd, _ := isFenceLine(data[beg:], nil, marker, newlineOptional)
+		if fenceEnd != 0 {
+			beg += fenceEnd
+			break
+		}
+
+		// copy the current line
+		end := skipUntilChar(data, beg, '\n') + 1
+
+		// did we reach the end of the buffer without a closing marker?
+		if end >= len(data) {
+			return 0
+		}
+
+		// verbatim copy to the working buffer
+		if doRender {
+			work.Write(data[beg:end])
+		}
+		beg = end
+	}
+
+	if doRender {
+		p.r.BlockCode(out, work.Bytes(), infoString)
+	}
+
+	return beg
+}
+
+func (p *parser) table(out *bytes.Buffer, data []byte) int {
+	var header bytes.Buffer
+	i, columns := p.tableHeader(&header, data)
+	if i == 0 {
+		return 0
+	}
+
+	var body bytes.Buffer
+
+	for i < len(data) {
+		pipes, rowStart := 0, i
+		for ; data[i] != '\n'; i++ {
+			if data[i] == '|' {
+				pipes++
+			}
+		}
+
+		if pipes == 0 {
+			i = rowStart
+			break
+		}
+
+		// include the newline in data sent to tableRow
+		i++
+		p.tableRow(&body, data[rowStart:i], columns, false)
+	}
+
+	p.r.Table(out, header.Bytes(), body.Bytes(), columns)
+
+	return i
+}
+
+// check if the specified position is preceded by an odd number of backslashes
+func isBackslashEscaped(data []byte, i int) bool {
+	backslashes := 0
+	for i-backslashes-1 >= 0 && data[i-backslashes-1] == '\\' {
+		backslashes++
+	}
+	return backslashes&1 == 1
+}
+
+func (p *parser) tableHeader(out *bytes.Buffer, data []byte) (size int, columns []int) {
+	i := 0
+	colCount := 1
+	for i = 0; data[i] != '\n'; i++ {
+		if data[i] == '|' && !isBackslashEscaped(data, i) {
+			colCount++
+		}
+	}
+
+	// doesn't look like a table header
+	if colCount == 1 {
+		return
+	}
+
+	// include the newline in the data sent to tableRow
+	header := data[:i+1]
+
+	// column count ignores pipes at beginning or end of line
+	if data[0] == '|' {
+		colCount--
+	}
+	if i > 2 && data[i-1] == '|' && !isBackslashEscaped(data, i-1) {
+		colCount--
+	}
+
+	columns = make([]int, colCount)
+
+	// move on to the header underline
+	i++
+	if i >= len(data) {
+		return
+	}
+
+	if data[i] == '|' && !isBackslashEscaped(data, i) {
+		i++
+	}
+	i = skipChar(data, i, ' ')
+
+	// each column header is of form: / *:?-+:? *|/ with # dashes + # colons >= 3
+	// and trailing | optional on last column
+	col := 0
+	for data[i] != '\n' {
+		dashes := 0
+
+		if data[i] == ':' {
+			i++
+			columns[col] |= TABLE_ALIGNMENT_LEFT
+			dashes++
+		}
+		for data[i] == '-' {
+			i++
+			dashes++
+		}
+		if data[i] == ':' {
+			i++
+			columns[col] |= TABLE_ALIGNMENT_RIGHT
+			dashes++
+		}
+		for data[i] == ' ' {
+			i++
+		}
+
+		// end of column test is messy
+		switch {
+		case dashes < 3:
+			// not a valid column
+			return
+
+		case data[i] == '|' && !isBackslashEscaped(data, i):
+			// marker found, now skip past trailing whitespace
+			col++
+			i++
+			for data[i] == ' ' {
+				i++
+			}
+
+			// trailing junk found after last column
+			if col >= colCount && data[i] != '\n' {
+				return
+			}
+
+		case (data[i] != '|' || isBackslashEscaped(data, i)) && col+1 < colCount:
+			// something else found where marker was required
+			return
+
+		case data[i] == '\n':
+			// marker is optional for the last column
+			col++
+
+		default:
+			// trailing junk found after last column
+			return
+		}
+	}
+	if col != colCount {
+		return
+	}
+
+	p.tableRow(out, header, columns, true)
+	size = i + 1
+	return
+}
+
+func (p *parser) tableRow(out *bytes.Buffer, data []byte, columns []int, header bool) {
+	i, col := 0, 0
+	var rowWork bytes.Buffer
+
+	if data[i] == '|' && !isBackslashEscaped(data, i) {
+		i++
+	}
+
+	for col = 0; col < len(columns) && i < len(data); col++ {
+		for data[i] == ' ' {
+			i++
+		}
+
+		cellStart := i
+
+		for (data[i] != '|' || isBackslashEscaped(data, i)) && data[i] != '\n' {
+			i++
+		}
+
+		cellEnd := i
+
+		// skip the end-of-cell marker, possibly taking us past end of buffer
+		i++
+
+		for cellEnd > cellStart && data[cellEnd-1] == ' ' {
+			cellEnd--
+		}
+
+		var cellWork bytes.Buffer
+		p.inline(&cellWork, data[cellStart:cellEnd])
+
+		if header {
+			p.r.TableHeaderCell(&rowWork, cellWork.Bytes(), columns[col])
+		} else {
+			p.r.TableCell(&rowWork, cellWork.Bytes(), columns[col])
+		}
+	}
+
+	// pad it out with empty columns to get the right number
+	for ; col < len(columns); col++ {
+		if header {
+			p.r.TableHeaderCell(&rowWork, nil, columns[col])
+		} else {
+			p.r.TableCell(&rowWork, nil, columns[col])
+		}
+	}
+
+	// silently ignore rows with too many cells
+
+	p.r.TableRow(out, rowWork.Bytes())
+}
+
+// returns blockquote prefix length
+func (p *parser) quotePrefix(data []byte) int {
+	i := 0
+	for i < 3 && data[i] == ' ' {
+		i++
+	}
+	if data[i] == '>' {
+		if data[i+1] == ' ' {
+			return i + 2
+		}
+		return i + 1
+	}
+	return 0
+}
+
+// blockquote ends with at least one blank line
+// followed by something without a blockquote prefix
+func (p *parser) terminateBlockquote(data []byte, beg, end int) bool {
+	if p.isEmpty(data[beg:]) <= 0 {
+		return false
+	}
+	if end >= len(data) {
+		return true
+	}
+	return p.quotePrefix(data[end:]) == 0 && p.isEmpty(data[end:]) == 0
+}
+
+// parse a blockquote fragment
+func (p *parser) quote(out *bytes.Buffer, data []byte) int {
+	var raw bytes.Buffer
+	beg, end := 0, 0
+	for beg < len(data) {
+		end = beg
+		// Step over whole lines, collecting them. While doing that, check for
+		// fenced code and if one's found, incorporate it altogether,
+		// irregardless of any contents inside it
+		for data[end] != '\n' {
+			if p.flags&EXTENSION_FENCED_CODE != 0 {
+				if i := p.fencedCodeBlock(out, data[end:], false); i > 0 {
+					// -1 to compensate for the extra end++ after the loop:
+					end += i - 1
+					break
+				}
+			}
+			end++
+		}
+		end++
+
+		if pre := p.quotePrefix(data[beg:]); pre > 0 {
+			// skip the prefix
+			beg += pre
+		} else if p.terminateBlockquote(data, beg, end) {
+			break
+		}
+
+		// this line is part of the blockquote
+		raw.Write(data[beg:end])
+		beg = end
+	}
+
+	var cooked bytes.Buffer
+	p.block(&cooked, raw.Bytes())
+	p.r.BlockQuote(out, cooked.Bytes())
+	return end
+}
+
+// returns prefix length for block code
+func (p *parser) codePrefix(data []byte) int {
+	if data[0] == ' ' && data[1] == ' ' && data[2] == ' ' && data[3] == ' ' {
+		return 4
+	}
+	return 0
+}
+
+func (p *parser) code(out *bytes.Buffer, data []byte) int {
+	var work bytes.Buffer
+
+	i := 0
+	for i < len(data) {
+		beg := i
+		for data[i] != '\n' {
+			i++
+		}
+		i++
+
+		blankline := p.isEmpty(data[beg:i]) > 0
+		if pre := p.codePrefix(data[beg:i]); pre > 0 {
+			beg += pre
+		} else if !blankline {
+			// non-empty, non-prefixed line breaks the pre
+			i = beg
+			break
+		}
+
+		// verbatim copy to the working buffeu
+		if blankline {
+			work.WriteByte('\n')
+		} else {
+			work.Write(data[beg:i])
+		}
+	}
+
+	// trim all the \n off the end of work
+	workbytes := work.Bytes()
+	eol := len(workbytes)
+	for eol > 0 && workbytes[eol-1] == '\n' {
+		eol--
+	}
+	if eol != len(workbytes) {
+		work.Truncate(eol)
+	}
+
+	work.WriteByte('\n')
+
+	p.r.BlockCode(out, work.Bytes(), "")
+
+	return i
+}
+
+// returns unordered list item prefix
+func (p *parser) uliPrefix(data []byte) int {
+	i := 0
+
+	// start with up to 3 spaces
+	for i < 3 && data[i] == ' ' {
+		i++
+	}
+
+	// need a *, +, or - followed by a space
+	if (data[i] != '*' && data[i] != '+' && data[i] != '-') ||
+		data[i+1] != ' ' {
+		return 0
+	}
+	return i + 2
+}
+
+// returns ordered list item prefix
+func (p *parser) oliPrefix(data []byte) int {
+	i := 0
+
+	// start with up to 3 spaces
+	for i < 3 && data[i] == ' ' {
+		i++
+	}
+
+	// count the digits
+	start := i
+	for data[i] >= '0' && data[i] <= '9' {
+		i++
+	}
+
+	// we need >= 1 digits followed by a dot and a space
+	if start == i || data[i] != '.' || data[i+1] != ' ' {
+		return 0
+	}
+	return i + 2
+}
+
+// returns definition list item prefix
+func (p *parser) dliPrefix(data []byte) int {
+	i := 0
+
+	// need a : followed by a spaces
+	if data[i] != ':' || data[i+1] != ' ' {
+		return 0
+	}
+	for data[i] == ' ' {
+		i++
+	}
+	return i + 2
+}
+
+// parse ordered or unordered list block
+func (p *parser) list(out *bytes.Buffer, data []byte, flags int) int {
+	i := 0
+	flags |= LIST_ITEM_BEGINNING_OF_LIST
+	work := func() bool {
+		for i < len(data) {
+			skip := p.listItem(out, data[i:], &flags)
+			i += skip
+
+			if skip == 0 || flags&LIST_ITEM_END_OF_LIST != 0 {
+				break
+			}
+			flags &= ^LIST_ITEM_BEGINNING_OF_LIST
+		}
+		return true
+	}
+
+	p.r.List(out, work, flags)
+	return i
+}
+
+// Parse a single list item.
+// Assumes initial prefix is already removed if this is a sublist.
+func (p *parser) listItem(out *bytes.Buffer, data []byte, flags *int) int {
+	// keep track of the indentation of the first line
+	itemIndent := 0
+	for itemIndent < 3 && data[itemIndent] == ' ' {
+		itemIndent++
+	}
+
+	i := p.uliPrefix(data)
+	if i == 0 {
+		i = p.oliPrefix(data)
+	}
+	if i == 0 {
+		i = p.dliPrefix(data)
+		// reset definition term flag
+		if i > 0 {
+			*flags &= ^LIST_TYPE_TERM
+		}
+	}
+	if i == 0 {
+		// if in defnition list, set term flag and continue
+		if *flags&LIST_TYPE_DEFINITION != 0 {
+			*flags |= LIST_TYPE_TERM
+		} else {
+			return 0
+		}
+	}
+
+	// skip leading whitespace on first line
+	for data[i] == ' ' {
+		i++
+	}
+
+	// find the end of the line
+	line := i
+	for i > 0 && data[i-1] != '\n' {
+		i++
+	}
+
+	// get working buffer
+	var raw bytes.Buffer
+
+	// put the first line into the working buffer
+	raw.Write(data[line:i])
+	line = i
+
+	// process the following lines
+	containsBlankLine := false
+	sublist := 0
+	codeBlockMarker := ""
+
+gatherlines:
+	for line < len(data) {
+		i++
+
+		// find the end of this line
+		for data[i-1] != '\n' {
+			i++
+		}
+
+		// if it is an empty line, guess that it is part of this item
+		// and move on to the next line
+		if p.isEmpty(data[line:i]) > 0 {
+			containsBlankLine = true
+			raw.Write(data[line:i])
+			line = i
+			continue
+		}
+
+		// calculate the indentation
+		indent := 0
+		for indent < 4 && line+indent < i && data[line+indent] == ' ' {
+			indent++
+		}
+
+		chunk := data[line+indent : i]
+
+		if p.flags&EXTENSION_FENCED_CODE != 0 {
+			// determine if in or out of codeblock
+			// if in codeblock, ignore normal list processing
+			_, marker := isFenceLine(chunk, nil, codeBlockMarker, false)
+			if marker != "" {
+				if codeBlockMarker == "" {
+					// start of codeblock
+					codeBlockMarker = marker
+				} else {
+					// end of codeblock.
+					*flags |= LIST_ITEM_CONTAINS_BLOCK
+					codeBlockMarker = ""
+				}
+			}
+			// we are in a codeblock, write line, and continue
+			if codeBlockMarker != "" || marker != "" {
+				raw.Write(data[line+indent : i])
+				line = i
+				continue gatherlines
+			}
+		}
+
+		// evaluate how this line fits in
+		switch {
+		// is this a nested list item?
+		case (p.uliPrefix(chunk) > 0 && !p.isHRule(chunk)) ||
+			p.oliPrefix(chunk) > 0 ||
+			p.dliPrefix(chunk) > 0:
+
+			if containsBlankLine {
+				// end the list if the type changed after a blank line
+				if indent <= itemIndent &&
+					((*flags&LIST_TYPE_ORDERED != 0 && p.uliPrefix(chunk) > 0) ||
+						(*flags&LIST_TYPE_ORDERED == 0 && p.oliPrefix(chunk) > 0)) {
+
+					*flags |= LIST_ITEM_END_OF_LIST
+					break gatherlines
+				}
+				*flags |= LIST_ITEM_CONTAINS_BLOCK
+			}
+
+			// to be a nested list, it must be indented more
+			// if not, it is the next item in the same list
+			if indent <= itemIndent {
+				break gatherlines
+			}
+
+			// is this the first item in the nested list?
+			if sublist == 0 {
+				sublist = raw.Len()
+			}
+
+		// is this a nested prefix header?
+		case p.isPrefixHeader(chunk):
+			// if the header is not indented, it is not nested in the list
+			// and thus ends the list
+			if containsBlankLine && indent < 4 {
+				*flags |= LIST_ITEM_END_OF_LIST
+				break gatherlines
+			}
+			*flags |= LIST_ITEM_CONTAINS_BLOCK
+
+		// anything following an empty line is only part
+		// of this item if it is indented 4 spaces
+		// (regardless of the indentation of the beginning of the item)
+		case containsBlankLine && indent < 4:
+			if *flags&LIST_TYPE_DEFINITION != 0 && i < len(data)-1 {
+				// is the next item still a part of this list?
+				next := i
+				for data[next] != '\n' {
+					next++
+				}
+				for next < len(data)-1 && data[next] == '\n' {
+					next++
+				}
+				if i < len(data)-1 && data[i] != ':' && data[next] != ':' {
+					*flags |= LIST_ITEM_END_OF_LIST
+				}
+			} else {
+				*flags |= LIST_ITEM_END_OF_LIST
+			}
+			break gatherlines
+
+		// a blank line means this should be parsed as a block
+		case containsBlankLine:
+			*flags |= LIST_ITEM_CONTAINS_BLOCK
+		}
+
+		containsBlankLine = false
+
+		// add the line into the working buffer without prefix
+		raw.Write(data[line+indent : i])
+
+		line = i
+	}
+
+	// If reached end of data, the Renderer.ListItem call we're going to make below
+	// is definitely the last in the list.
+	if line >= len(data) {
+		*flags |= LIST_ITEM_END_OF_LIST
+	}
+
+	rawBytes := raw.Bytes()
+
+	// render the contents of the list item
+	var cooked bytes.Buffer
+	if *flags&LIST_ITEM_CONTAINS_BLOCK != 0 && *flags&LIST_TYPE_TERM == 0 {
+		// intermediate render of block item, except for definition term
+		if sublist > 0 {
+			p.block(&cooked, rawBytes[:sublist])
+			p.block(&cooked, rawBytes[sublist:])
+		} else {
+			p.block(&cooked, rawBytes)
+		}
+	} else {
+		// intermediate render of inline item
+		if sublist > 0 {
+			p.inline(&cooked, rawBytes[:sublist])
+			p.block(&cooked, rawBytes[sublist:])
+		} else {
+			p.inline(&cooked, rawBytes)
+		}
+	}
+
+	// render the actual list item
+	cookedBytes := cooked.Bytes()
+	parsedEnd := len(cookedBytes)
+
+	// strip trailing newlines
+	for parsedEnd > 0 && cookedBytes[parsedEnd-1] == '\n' {
+		parsedEnd--
+	}
+	p.r.ListItem(out, cookedBytes[:parsedEnd], *flags)
+
+	return line
+}
+
+// render a single paragraph that has already been parsed out
+func (p *parser) renderParagraph(out *bytes.Buffer, data []byte) {
+	if len(data) == 0 {
+		return
+	}
+
+	// trim leading spaces
+	beg := 0
+	for data[beg] == ' ' {
+		beg++
+	}
+
+	// trim trailing newline
+	end := len(data) - 1
+
+	// trim trailing spaces
+	for end > beg && data[end-1] == ' ' {
+		end--
+	}
+
+	work := func() bool {
+		p.inline(out, data[beg:end])
+		return true
+	}
+	p.r.Paragraph(out, work)
+}
+
+func (p *parser) paragraph(out *bytes.Buffer, data []byte) int {
+	// prev: index of 1st char of previous line
+	// line: index of 1st char of current line
+	// i: index of cursor/end of current line
+	var prev, line, i int
+
+	// keep going until we find something to mark the end of the paragraph
+	for i < len(data) {
+		// mark the beginning of the current line
+		prev = line
+		current := data[i:]
+		line = i
+
+		// did we find a blank line marking the end of the paragraph?
+		if n := p.isEmpty(current); n > 0 {
+			// did this blank line followed by a definition list item?
+			if p.flags&EXTENSION_DEFINITION_LISTS != 0 {
+				if i < len(data)-1 && data[i+1] == ':' {
+					return p.list(out, data[prev:], LIST_TYPE_DEFINITION)
+				}
+			}
+
+			p.renderParagraph(out, data[:i])
+			return i + n
+		}
+
+		// an underline under some text marks a header, so our paragraph ended on prev line
+		if i > 0 {
+			if level := p.isUnderlinedHeader(current); level > 0 {
+				// render the paragraph
+				p.renderParagraph(out, data[:prev])
+
+				// ignore leading and trailing whitespace
+				eol := i - 1
+				for prev < eol && data[prev] == ' ' {
+					prev++
+				}
+				for eol > prev && data[eol-1] == ' ' {
+					eol--
+				}
+
+				// render the header
+				// this ugly double closure avoids forcing variables onto the heap
+				work := func(o *bytes.Buffer, pp *parser, d []byte) func() bool {
+					return func() bool {
+						pp.inline(o, d)
+						return true
+					}
+				}(out, p, data[prev:eol])
+
+				id := ""
+				if p.flags&EXTENSION_AUTO_HEADER_IDS != 0 {
+					id = SanitizedAnchorName(string(data[prev:eol]))
+				}
+
+				p.r.Header(out, work, level, id)
+
+				// find the end of the underline
+				for data[i] != '\n' {
+					i++
+				}
+				return i
+			}
+		}
+
+		// if the next line starts a block of HTML, then the paragraph ends here
+		if p.flags&EXTENSION_LAX_HTML_BLOCKS != 0 {
+			if data[i] == '<' && p.html(out, current, false) > 0 {
+				// rewind to before the HTML block
+				p.renderParagraph(out, data[:i])
+				return i
+			}
+		}
+
+		// if there's a prefixed header or a horizontal rule after this, paragraph is over
+		if p.isPrefixHeader(current) || p.isHRule(current) {
+			p.renderParagraph(out, data[:i])
+			return i
+		}
+
+		// if there's a fenced code block, paragraph is over
+		if p.flags&EXTENSION_FENCED_CODE != 0 {
+			if p.fencedCodeBlock(out, current, false) > 0 {
+				p.renderParagraph(out, data[:i])
+				return i
+			}
+		}
+
+		// if there's a definition list item, prev line is a definition term
+		if p.flags&EXTENSION_DEFINITION_LISTS != 0 {
+			if p.dliPrefix(current) != 0 {
+				return p.list(out, data[prev:], LIST_TYPE_DEFINITION)
+			}
+		}
+
+		// if there's a list after this, paragraph is over
+		if p.flags&EXTENSION_NO_EMPTY_LINE_BEFORE_BLOCK != 0 {
+			if p.uliPrefix(current) != 0 ||
+				p.oliPrefix(current) != 0 ||
+				p.quotePrefix(current) != 0 ||
+				p.codePrefix(current) != 0 {
+				p.renderParagraph(out, data[:i])
+				return i
+			}
+		}
+
+		// otherwise, scan to the beginning of the next line
+		for data[i] != '\n' {
+			i++
+		}
+		i++
+	}
+
+	p.renderParagraph(out, data[:i])
+	return i
+}
+
+// SanitizedAnchorName returns a sanitized anchor name for the given text.
+//
+// It implements the algorithm specified in the package comment.
+func SanitizedAnchorName(text string) string {
+	var anchorName []rune
+	futureDash := false
+	for _, r := range text {
+		switch {
+		case unicode.IsLetter(r) || unicode.IsNumber(r):
+			if futureDash && len(anchorName) > 0 {
+				anchorName = append(anchorName, '-')
+			}
+			futureDash = false
+			anchorName = append(anchorName, unicode.ToLower(r))
+		default:
+			futureDash = true
+		}
+	}
+	return string(anchorName)
+}
diff --git a/vendor/github.com/russross/blackfriday/doc.go b/vendor/github.com/russross/blackfriday/doc.go
new file mode 100644
index 00000000..9656c42a
--- /dev/null
+++ b/vendor/github.com/russross/blackfriday/doc.go
@@ -0,0 +1,32 @@
+// Package blackfriday is a Markdown processor.
+//
+// It translates plain text with simple formatting rules into HTML or LaTeX.
+//
+// Sanitized Anchor Names
+//
+// Blackfriday includes an algorithm for creating sanitized anchor names
+// corresponding to a given input text. This algorithm is used to create
+// anchors for headings when EXTENSION_AUTO_HEADER_IDS is enabled. The
+// algorithm is specified below, so that other packages can create
+// compatible anchor names and links to those anchors.
+//
+// The algorithm iterates over the input text, interpreted as UTF-8,
+// one Unicode code point (rune) at a time. All runes that are letters (category L)
+// or numbers (category N) are considered valid characters. They are mapped to
+// lower case, and included in the output. All other runes are considered
+// invalid characters. Invalid characters that preceed the first valid character,
+// as well as invalid character that follow the last valid character
+// are dropped completely. All other sequences of invalid characters
+// between two valid characters are replaced with a single dash character '-'.
+//
+// SanitizedAnchorName exposes this functionality, and can be used to
+// create compatible links to the anchor names generated by blackfriday.
+// This algorithm is also implemented in a small standalone package at
+// github.com/shurcooL/sanitized_anchor_name. It can be useful for clients
+// that want a small package and don't need full functionality of blackfriday.
+package blackfriday
+
+// NOTE: Keep Sanitized Anchor Name algorithm in sync with package
+//       github.com/shurcooL/sanitized_anchor_name.
+//       Otherwise, users of sanitized_anchor_name will get anchor names
+//       that are incompatible with those generated by blackfriday.
diff --git a/vendor/github.com/russross/blackfriday/go.mod b/vendor/github.com/russross/blackfriday/go.mod
new file mode 100644
index 00000000..b05561a0
--- /dev/null
+++ b/vendor/github.com/russross/blackfriday/go.mod
@@ -0,0 +1 @@
+module github.com/russross/blackfriday
diff --git a/vendor/github.com/russross/blackfriday/html.go b/vendor/github.com/russross/blackfriday/html.go
new file mode 100644
index 00000000..e0a6c69c
--- /dev/null
+++ b/vendor/github.com/russross/blackfriday/html.go
@@ -0,0 +1,938 @@
+//
+// Blackfriday Markdown Processor
+// Available at http://github.com/russross/blackfriday
+//
+// Copyright © 2011 Russ Ross <russ@russross.com>.
+// Distributed under the Simplified BSD License.
+// See README.md for details.
+//
+
+//
+//
+// HTML rendering backend
+//
+//
+
+package blackfriday
+
+import (
+	"bytes"
+	"fmt"
+	"regexp"
+	"strconv"
+	"strings"
+)
+
+// Html renderer configuration options.
+const (
+	HTML_SKIP_HTML                 = 1 << iota // skip preformatted HTML blocks
+	HTML_SKIP_STYLE                            // skip embedded <style> elements
+	HTML_SKIP_IMAGES                           // skip embedded images
+	HTML_SKIP_LINKS                            // skip all links
+	HTML_SAFELINK                              // only link to trusted protocols
+	HTML_NOFOLLOW_LINKS                        // only link with rel="nofollow"
+	HTML_NOREFERRER_LINKS                      // only link with rel="noreferrer"
+	HTML_HREF_TARGET_BLANK                     // add a blank target
+	HTML_TOC                                   // generate a table of contents
+	HTML_OMIT_CONTENTS                         // skip the main contents (for a standalone table of contents)
+	HTML_COMPLETE_PAGE                         // generate a complete HTML page
+	HTML_USE_XHTML                             // generate XHTML output instead of HTML
+	HTML_USE_SMARTYPANTS                       // enable smart punctuation substitutions
+	HTML_SMARTYPANTS_FRACTIONS                 // enable smart fractions (with HTML_USE_SMARTYPANTS)
+	HTML_SMARTYPANTS_DASHES                    // enable smart dashes (with HTML_USE_SMARTYPANTS)
+	HTML_SMARTYPANTS_LATEX_DASHES              // enable LaTeX-style dashes (with HTML_USE_SMARTYPANTS and HTML_SMARTYPANTS_DASHES)
+	HTML_SMARTYPANTS_ANGLED_QUOTES             // enable angled double quotes (with HTML_USE_SMARTYPANTS) for double quotes rendering
+	HTML_SMARTYPANTS_QUOTES_NBSP               // enable "French guillemets" (with HTML_USE_SMARTYPANTS)
+	HTML_FOOTNOTE_RETURN_LINKS                 // generate a link at the end of a footnote to return to the source
+)
+
+var (
+	alignments = []string{
+		"left",
+		"right",
+		"center",
+	}
+
+	// TODO: improve this regexp to catch all possible entities:
+	htmlEntity = regexp.MustCompile(`&[a-z]{2,5};`)
+)
+
+type HtmlRendererParameters struct {
+	// Prepend this text to each relative URL.
+	AbsolutePrefix string
+	// Add this text to each footnote anchor, to ensure uniqueness.
+	FootnoteAnchorPrefix string
+	// Show this text inside the <a> tag for a footnote return link, if the
+	// HTML_FOOTNOTE_RETURN_LINKS flag is enabled. If blank, the string
+	// <sup>[return]</sup> is used.
+	FootnoteReturnLinkContents string
+	// If set, add this text to the front of each Header ID, to ensure
+	// uniqueness.
+	HeaderIDPrefix string
+	// If set, add this text to the back of each Header ID, to ensure uniqueness.
+	HeaderIDSuffix string
+}
+
+// Html is a type that implements the Renderer interface for HTML output.
+//
+// Do not create this directly, instead use the HtmlRenderer function.
+type Html struct {
+	flags    int    // HTML_* options
+	closeTag string // how to end singleton tags: either " />" or ">"
+	title    string // document title
+	css      string // optional css file url (used with HTML_COMPLETE_PAGE)
+
+	parameters HtmlRendererParameters
+
+	// table of contents data
+	tocMarker    int
+	headerCount  int
+	currentLevel int
+	toc          *bytes.Buffer
+
+	// Track header IDs to prevent ID collision in a single generation.
+	headerIDs map[string]int
+
+	smartypants *smartypantsRenderer
+}
+
+const (
+	xhtmlClose = " />"
+	htmlClose  = ">"
+)
+
+// HtmlRenderer creates and configures an Html object, which
+// satisfies the Renderer interface.
+//
+// flags is a set of HTML_* options ORed together.
+// title is the title of the document, and css is a URL for the document's
+// stylesheet.
+// title and css are only used when HTML_COMPLETE_PAGE is selected.
+func HtmlRenderer(flags int, title string, css string) Renderer {
+	return HtmlRendererWithParameters(flags, title, css, HtmlRendererParameters{})
+}
+
+func HtmlRendererWithParameters(flags int, title string,
+	css string, renderParameters HtmlRendererParameters) Renderer {
+	// configure the rendering engine
+	closeTag := htmlClose
+	if flags&HTML_USE_XHTML != 0 {
+		closeTag = xhtmlClose
+	}
+
+	if renderParameters.FootnoteReturnLinkContents == "" {
+		renderParameters.FootnoteReturnLinkContents = `<sup>[return]</sup>`
+	}
+
+	return &Html{
+		flags:      flags,
+		closeTag:   closeTag,
+		title:      title,
+		css:        css,
+		parameters: renderParameters,
+
+		headerCount:  0,
+		currentLevel: 0,
+		toc:          new(bytes.Buffer),
+
+		headerIDs: make(map[string]int),
+
+		smartypants: smartypants(flags),
+	}
+}
+
+// Using if statements is a bit faster than a switch statement. As the compiler
+// improves, this should be unnecessary this is only worthwhile because
+// attrEscape is the single largest CPU user in normal use.
+// Also tried using map, but that gave a ~3x slowdown.
+func escapeSingleChar(char byte) (string, bool) {
+	if char == '"' {
+		return "&quot;", true
+	}
+	if char == '&' {
+		return "&amp;", true
+	}
+	if char == '<' {
+		return "&lt;", true
+	}
+	if char == '>' {
+		return "&gt;", true
+	}
+	return "", false
+}
+
+func attrEscape(out *bytes.Buffer, src []byte) {
+	org := 0
+	for i, ch := range src {
+		if entity, ok := escapeSingleChar(ch); ok {
+			if i > org {
+				// copy all the normal characters since the last escape
+				out.Write(src[org:i])
+			}
+			org = i + 1
+			out.WriteString(entity)
+		}
+	}
+	if org < len(src) {
+		out.Write(src[org:])
+	}
+}
+
+func entityEscapeWithSkip(out *bytes.Buffer, src []byte, skipRanges [][]int) {
+	end := 0
+	for _, rang := range skipRanges {
+		attrEscape(out, src[end:rang[0]])
+		out.Write(src[rang[0]:rang[1]])
+		end = rang[1]
+	}
+	attrEscape(out, src[end:])
+}
+
+func (options *Html) GetFlags() int {
+	return options.flags
+}
+
+func (options *Html) TitleBlock(out *bytes.Buffer, text []byte) {
+	text = bytes.TrimPrefix(text, []byte("% "))
+	text = bytes.Replace(text, []byte("\n% "), []byte("\n"), -1)
+	out.WriteString("<h1 class=\"title\">")
+	out.Write(text)
+	out.WriteString("\n</h1>")
+}
+
+func (options *Html) Header(out *bytes.Buffer, text func() bool, level int, id string) {
+	marker := out.Len()
+	doubleSpace(out)
+
+	if id == "" && options.flags&HTML_TOC != 0 {
+		id = fmt.Sprintf("toc_%d", options.headerCount)
+	}
+
+	if id != "" {
+		id = options.ensureUniqueHeaderID(id)
+
+		if options.parameters.HeaderIDPrefix != "" {
+			id = options.parameters.HeaderIDPrefix + id
+		}
+
+		if options.parameters.HeaderIDSuffix != "" {
+			id = id + options.parameters.HeaderIDSuffix
+		}
+
+		out.WriteString(fmt.Sprintf("<h%d id=\"%s\">", level, id))
+	} else {
+		out.WriteString(fmt.Sprintf("<h%d>", level))
+	}
+
+	tocMarker := out.Len()
+	if !text() {
+		out.Truncate(marker)
+		return
+	}
+
+	// are we building a table of contents?
+	if options.flags&HTML_TOC != 0 {
+		options.TocHeaderWithAnchor(out.Bytes()[tocMarker:], level, id)
+	}
+
+	out.WriteString(fmt.Sprintf("</h%d>\n", level))
+}
+
+func (options *Html) BlockHtml(out *bytes.Buffer, text []byte) {
+	if options.flags&HTML_SKIP_HTML != 0 {
+		return
+	}
+
+	doubleSpace(out)
+	out.Write(text)
+	out.WriteByte('\n')
+}
+
+func (options *Html) HRule(out *bytes.Buffer) {
+	doubleSpace(out)
+	out.WriteString("<hr")
+	out.WriteString(options.closeTag)
+	out.WriteByte('\n')
+}
+
+func (options *Html) BlockCode(out *bytes.Buffer, text []byte, info string) {
+	doubleSpace(out)
+
+	endOfLang := strings.IndexAny(info, "\t ")
+	if endOfLang < 0 {
+		endOfLang = len(info)
+	}
+	lang := info[:endOfLang]
+	if len(lang) == 0 || lang == "." {
+		out.WriteString("<pre><code>")
+	} else {
+		out.WriteString("<pre><code class=\"language-")
+		attrEscape(out, []byte(lang))
+		out.WriteString("\">")
+	}
+	attrEscape(out, text)
+	out.WriteString("</code></pre>\n")
+}
+
+func (options *Html) BlockQuote(out *bytes.Buffer, text []byte) {
+	doubleSpace(out)
+	out.WriteString("<blockquote>\n")
+	out.Write(text)
+	out.WriteString("</blockquote>\n")
+}
+
+func (options *Html) Table(out *bytes.Buffer, header []byte, body []byte, columnData []int) {
+	doubleSpace(out)
+	out.WriteString("<table>\n<thead>\n")
+	out.Write(header)
+	out.WriteString("</thead>\n\n<tbody>\n")
+	out.Write(body)
+	out.WriteString("</tbody>\n</table>\n")
+}
+
+func (options *Html) TableRow(out *bytes.Buffer, text []byte) {
+	doubleSpace(out)
+	out.WriteString("<tr>\n")
+	out.Write(text)
+	out.WriteString("\n</tr>\n")
+}
+
+func (options *Html) TableHeaderCell(out *bytes.Buffer, text []byte, align int) {
+	doubleSpace(out)
+	switch align {
+	case TABLE_ALIGNMENT_LEFT:
+		out.WriteString("<th align=\"left\">")
+	case TABLE_ALIGNMENT_RIGHT:
+		out.WriteString("<th align=\"right\">")
+	case TABLE_ALIGNMENT_CENTER:
+		out.WriteString("<th align=\"center\">")
+	default:
+		out.WriteString("<th>")
+	}
+
+	out.Write(text)
+	out.WriteString("</th>")
+}
+
+func (options *Html) TableCell(out *bytes.Buffer, text []byte, align int) {
+	doubleSpace(out)
+	switch align {
+	case TABLE_ALIGNMENT_LEFT:
+		out.WriteString("<td align=\"left\">")
+	case TABLE_ALIGNMENT_RIGHT:
+		out.WriteString("<td align=\"right\">")
+	case TABLE_ALIGNMENT_CENTER:
+		out.WriteString("<td align=\"center\">")
+	default:
+		out.WriteString("<td>")
+	}
+
+	out.Write(text)
+	out.WriteString("</td>")
+}
+
+func (options *Html) Footnotes(out *bytes.Buffer, text func() bool) {
+	out.WriteString("<div class=\"footnotes\">\n")
+	options.HRule(out)
+	options.List(out, text, LIST_TYPE_ORDERED)
+	out.WriteString("</div>\n")
+}
+
+func (options *Html) FootnoteItem(out *bytes.Buffer, name, text []byte, flags int) {
+	if flags&LIST_ITEM_CONTAINS_BLOCK != 0 || flags&LIST_ITEM_BEGINNING_OF_LIST != 0 {
+		doubleSpace(out)
+	}
+	slug := slugify(name)
+	out.WriteString(`<li id="`)
+	out.WriteString(`fn:`)
+	out.WriteString(options.parameters.FootnoteAnchorPrefix)
+	out.Write(slug)
+	out.WriteString(`">`)
+	out.Write(text)
+	if options.flags&HTML_FOOTNOTE_RETURN_LINKS != 0 {
+		out.WriteString(` <a class="footnote-return" href="#`)
+		out.WriteString(`fnref:`)
+		out.WriteString(options.parameters.FootnoteAnchorPrefix)
+		out.Write(slug)
+		out.WriteString(`">`)
+		out.WriteString(options.parameters.FootnoteReturnLinkContents)
+		out.WriteString(`</a>`)
+	}
+	out.WriteString("</li>\n")
+}
+
+func (options *Html) List(out *bytes.Buffer, text func() bool, flags int) {
+	marker := out.Len()
+	doubleSpace(out)
+
+	if flags&LIST_TYPE_DEFINITION != 0 {
+		out.WriteString("<dl>")
+	} else if flags&LIST_TYPE_ORDERED != 0 {
+		out.WriteString("<ol>")
+	} else {
+		out.WriteString("<ul>")
+	}
+	if !text() {
+		out.Truncate(marker)
+		return
+	}
+	if flags&LIST_TYPE_DEFINITION != 0 {
+		out.WriteString("</dl>\n")
+	} else if flags&LIST_TYPE_ORDERED != 0 {
+		out.WriteString("</ol>\n")
+	} else {
+		out.WriteString("</ul>\n")
+	}
+}
+
+func (options *Html) ListItem(out *bytes.Buffer, text []byte, flags int) {
+	if (flags&LIST_ITEM_CONTAINS_BLOCK != 0 && flags&LIST_TYPE_DEFINITION == 0) ||
+		flags&LIST_ITEM_BEGINNING_OF_LIST != 0 {
+		doubleSpace(out)
+	}
+	if flags&LIST_TYPE_TERM != 0 {
+		out.WriteString("<dt>")
+	} else if flags&LIST_TYPE_DEFINITION != 0 {
+		out.WriteString("<dd>")
+	} else {
+		out.WriteString("<li>")
+	}
+	out.Write(text)
+	if flags&LIST_TYPE_TERM != 0 {
+		out.WriteString("</dt>\n")
+	} else if flags&LIST_TYPE_DEFINITION != 0 {
+		out.WriteString("</dd>\n")
+	} else {
+		out.WriteString("</li>\n")
+	}
+}
+
+func (options *Html) Paragraph(out *bytes.Buffer, text func() bool) {
+	marker := out.Len()
+	doubleSpace(out)
+
+	out.WriteString("<p>")
+	if !text() {
+		out.Truncate(marker)
+		return
+	}
+	out.WriteString("</p>\n")
+}
+
+func (options *Html) AutoLink(out *bytes.Buffer, link []byte, kind int) {
+	skipRanges := htmlEntity.FindAllIndex(link, -1)
+	if options.flags&HTML_SAFELINK != 0 && !isSafeLink(link) && kind != LINK_TYPE_EMAIL {
+		// mark it but don't link it if it is not a safe link: no smartypants
+		out.WriteString("<tt>")
+		entityEscapeWithSkip(out, link, skipRanges)
+		out.WriteString("</tt>")
+		return
+	}
+
+	out.WriteString("<a href=\"")
+	if kind == LINK_TYPE_EMAIL {
+		out.WriteString("mailto:")
+	} else {
+		options.maybeWriteAbsolutePrefix(out, link)
+	}
+
+	entityEscapeWithSkip(out, link, skipRanges)
+
+	var relAttrs []string
+	if options.flags&HTML_NOFOLLOW_LINKS != 0 && !isRelativeLink(link) {
+		relAttrs = append(relAttrs, "nofollow")
+	}
+	if options.flags&HTML_NOREFERRER_LINKS != 0 && !isRelativeLink(link) {
+		relAttrs = append(relAttrs, "noreferrer")
+	}
+	if len(relAttrs) > 0 {
+		out.WriteString(fmt.Sprintf("\" rel=\"%s", strings.Join(relAttrs, " ")))
+	}
+
+	// blank target only add to external link
+	if options.flags&HTML_HREF_TARGET_BLANK != 0 && !isRelativeLink(link) {
+		out.WriteString("\" target=\"_blank")
+	}
+
+	out.WriteString("\">")
+
+	// Pretty print: if we get an email address as
+	// an actual URI, e.g. `mailto:foo@bar.com`, we don't
+	// want to print the `mailto:` prefix
+	switch {
+	case bytes.HasPrefix(link, []byte("mailto://")):
+		attrEscape(out, link[len("mailto://"):])
+	case bytes.HasPrefix(link, []byte("mailto:")):
+		attrEscape(out, link[len("mailto:"):])
+	default:
+		entityEscapeWithSkip(out, link, skipRanges)
+	}
+
+	out.WriteString("</a>")
+}
+
+func (options *Html) CodeSpan(out *bytes.Buffer, text []byte) {
+	out.WriteString("<code>")
+	attrEscape(out, text)
+	out.WriteString("</code>")
+}
+
+func (options *Html) DoubleEmphasis(out *bytes.Buffer, text []byte) {
+	out.WriteString("<strong>")
+	out.Write(text)
+	out.WriteString("</strong>")
+}
+
+func (options *Html) Emphasis(out *bytes.Buffer, text []byte) {
+	if len(text) == 0 {
+		return
+	}
+	out.WriteString("<em>")
+	out.Write(text)
+	out.WriteString("</em>")
+}
+
+func (options *Html) maybeWriteAbsolutePrefix(out *bytes.Buffer, link []byte) {
+	if options.parameters.AbsolutePrefix != "" && isRelativeLink(link) && link[0] != '.' {
+		out.WriteString(options.parameters.AbsolutePrefix)
+		if link[0] != '/' {
+			out.WriteByte('/')
+		}
+	}
+}
+
+func (options *Html) Image(out *bytes.Buffer, link []byte, title []byte, alt []byte) {
+	if options.flags&HTML_SKIP_IMAGES != 0 {
+		return
+	}
+
+	out.WriteString("<img src=\"")
+	options.maybeWriteAbsolutePrefix(out, link)
+	attrEscape(out, link)
+	out.WriteString("\" alt=\"")
+	if len(alt) > 0 {
+		attrEscape(out, alt)
+	}
+	if len(title) > 0 {
+		out.WriteString("\" title=\"")
+		attrEscape(out, title)
+	}
+
+	out.WriteByte('"')
+	out.WriteString(options.closeTag)
+}
+
+func (options *Html) LineBreak(out *bytes.Buffer) {
+	out.WriteString("<br")
+	out.WriteString(options.closeTag)
+	out.WriteByte('\n')
+}
+
+func (options *Html) Link(out *bytes.Buffer, link []byte, title []byte, content []byte) {
+	if options.flags&HTML_SKIP_LINKS != 0 {
+		// write the link text out but don't link it, just mark it with typewriter font
+		out.WriteString("<tt>")
+		attrEscape(out, content)
+		out.WriteString("</tt>")
+		return
+	}
+
+	if options.flags&HTML_SAFELINK != 0 && !isSafeLink(link) {
+		// write the link text out but don't link it, just mark it with typewriter font
+		out.WriteString("<tt>")
+		attrEscape(out, content)
+		out.WriteString("</tt>")
+		return
+	}
+
+	out.WriteString("<a href=\"")
+	options.maybeWriteAbsolutePrefix(out, link)
+	attrEscape(out, link)
+	if len(title) > 0 {
+		out.WriteString("\" title=\"")
+		attrEscape(out, title)
+	}
+	var relAttrs []string
+	if options.flags&HTML_NOFOLLOW_LINKS != 0 && !isRelativeLink(link) {
+		relAttrs = append(relAttrs, "nofollow")
+	}
+	if options.flags&HTML_NOREFERRER_LINKS != 0 && !isRelativeLink(link) {
+		relAttrs = append(relAttrs, "noreferrer")
+	}
+	if len(relAttrs) > 0 {
+		out.WriteString(fmt.Sprintf("\" rel=\"%s", strings.Join(relAttrs, " ")))
+	}
+
+	// blank target only add to external link
+	if options.flags&HTML_HREF_TARGET_BLANK != 0 && !isRelativeLink(link) {
+		out.WriteString("\" target=\"_blank")
+	}
+
+	out.WriteString("\">")
+	out.Write(content)
+	out.WriteString("</a>")
+	return
+}
+
+func (options *Html) RawHtmlTag(out *bytes.Buffer, text []byte) {
+	if options.flags&HTML_SKIP_HTML != 0 {
+		return
+	}
+	if options.flags&HTML_SKIP_STYLE != 0 && isHtmlTag(text, "style") {
+		return
+	}
+	if options.flags&HTML_SKIP_LINKS != 0 && isHtmlTag(text, "a") {
+		return
+	}
+	if options.flags&HTML_SKIP_IMAGES != 0 && isHtmlTag(text, "img") {
+		return
+	}
+	out.Write(text)
+}
+
+func (options *Html) TripleEmphasis(out *bytes.Buffer, text []byte) {
+	out.WriteString("<strong><em>")
+	out.Write(text)
+	out.WriteString("</em></strong>")
+}
+
+func (options *Html) StrikeThrough(out *bytes.Buffer, text []byte) {
+	out.WriteString("<del>")
+	out.Write(text)
+	out.WriteString("</del>")
+}
+
+func (options *Html) FootnoteRef(out *bytes.Buffer, ref []byte, id int) {
+	slug := slugify(ref)
+	out.WriteString(`<sup class="footnote-ref" id="`)
+	out.WriteString(`fnref:`)
+	out.WriteString(options.parameters.FootnoteAnchorPrefix)
+	out.Write(slug)
+	out.WriteString(`"><a href="#`)
+	out.WriteString(`fn:`)
+	out.WriteString(options.parameters.FootnoteAnchorPrefix)
+	out.Write(slug)
+	out.WriteString(`">`)
+	out.WriteString(strconv.Itoa(id))
+	out.WriteString(`</a></sup>`)
+}
+
+func (options *Html) Entity(out *bytes.Buffer, entity []byte) {
+	out.Write(entity)
+}
+
+func (options *Html) NormalText(out *bytes.Buffer, text []byte) {
+	if options.flags&HTML_USE_SMARTYPANTS != 0 {
+		options.Smartypants(out, text)
+	} else {
+		attrEscape(out, text)
+	}
+}
+
+func (options *Html) Smartypants(out *bytes.Buffer, text []byte) {
+	smrt := smartypantsData{false, false}
+
+	// first do normal entity escaping
+	var escaped bytes.Buffer
+	attrEscape(&escaped, text)
+	text = escaped.Bytes()
+
+	mark := 0
+	for i := 0; i < len(text); i++ {
+		if action := options.smartypants[text[i]]; action != nil {
+			if i > mark {
+				out.Write(text[mark:i])
+			}
+
+			previousChar := byte(0)
+			if i > 0 {
+				previousChar = text[i-1]
+			}
+			i += action(out, &smrt, previousChar, text[i:])
+			mark = i + 1
+		}
+	}
+
+	if mark < len(text) {
+		out.Write(text[mark:])
+	}
+}
+
+func (options *Html) DocumentHeader(out *bytes.Buffer) {
+	if options.flags&HTML_COMPLETE_PAGE == 0 {
+		return
+	}
+
+	ending := ""
+	if options.flags&HTML_USE_XHTML != 0 {
+		out.WriteString("<!DOCTYPE html PUBLIC \"-//W3C//DTD XHTML 1.0 Transitional//EN\" ")
+		out.WriteString("\"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd\">\n")
+		out.WriteString("<html xmlns=\"http://www.w3.org/1999/xhtml\">\n")
+		ending = " /"
+	} else {
+		out.WriteString("<!DOCTYPE html>\n")
+		out.WriteString("<html>\n")
+	}
+	out.WriteString("<head>\n")
+	out.WriteString("  <title>")
+	options.NormalText(out, []byte(options.title))
+	out.WriteString("</title>\n")
+	out.WriteString("  <meta name=\"GENERATOR\" content=\"Blackfriday Markdown Processor v")
+	out.WriteString(VERSION)
+	out.WriteString("\"")
+	out.WriteString(ending)
+	out.WriteString(">\n")
+	out.WriteString("  <meta charset=\"utf-8\"")
+	out.WriteString(ending)
+	out.WriteString(">\n")
+	if options.css != "" {
+		out.WriteString("  <link rel=\"stylesheet\" type=\"text/css\" href=\"")
+		attrEscape(out, []byte(options.css))
+		out.WriteString("\"")
+		out.WriteString(ending)
+		out.WriteString(">\n")
+	}
+	out.WriteString("</head>\n")
+	out.WriteString("<body>\n")
+
+	options.tocMarker = out.Len()
+}
+
+func (options *Html) DocumentFooter(out *bytes.Buffer) {
+	// finalize and insert the table of contents
+	if options.flags&HTML_TOC != 0 {
+		options.TocFinalize()
+
+		// now we have to insert the table of contents into the document
+		var temp bytes.Buffer
+
+		// start by making a copy of everything after the document header
+		temp.Write(out.Bytes()[options.tocMarker:])
+
+		// now clear the copied material from the main output buffer
+		out.Truncate(options.tocMarker)
+
+		// corner case spacing issue
+		if options.flags&HTML_COMPLETE_PAGE != 0 {
+			out.WriteByte('\n')
+		}
+
+		// insert the table of contents
+		out.WriteString("<nav>\n")
+		out.Write(options.toc.Bytes())
+		out.WriteString("</nav>\n")
+
+		// corner case spacing issue
+		if options.flags&HTML_COMPLETE_PAGE == 0 && options.flags&HTML_OMIT_CONTENTS == 0 {
+			out.WriteByte('\n')
+		}
+
+		// write out everything that came after it
+		if options.flags&HTML_OMIT_CONTENTS == 0 {
+			out.Write(temp.Bytes())
+		}
+	}
+
+	if options.flags&HTML_COMPLETE_PAGE != 0 {
+		out.WriteString("\n</body>\n")
+		out.WriteString("</html>\n")
+	}
+
+}
+
+func (options *Html) TocHeaderWithAnchor(text []byte, level int, anchor string) {
+	for level > options.currentLevel {
+		switch {
+		case bytes.HasSuffix(options.toc.Bytes(), []byte("</li>\n")):
+			// this sublist can nest underneath a header
+			size := options.toc.Len()
+			options.toc.Truncate(size - len("</li>\n"))
+
+		case options.currentLevel > 0:
+			options.toc.WriteString("<li>")
+		}
+		if options.toc.Len() > 0 {
+			options.toc.WriteByte('\n')
+		}
+		options.toc.WriteString("<ul>\n")
+		options.currentLevel++
+	}
+
+	for level < options.currentLevel {
+		options.toc.WriteString("</ul>")
+		if options.currentLevel > 1 {
+			options.toc.WriteString("</li>\n")
+		}
+		options.currentLevel--
+	}
+
+	options.toc.WriteString("<li><a href=\"#")
+	if anchor != "" {
+		options.toc.WriteString(anchor)
+	} else {
+		options.toc.WriteString("toc_")
+		options.toc.WriteString(strconv.Itoa(options.headerCount))
+	}
+	options.toc.WriteString("\">")
+	options.headerCount++
+
+	options.toc.Write(text)
+
+	options.toc.WriteString("</a></li>\n")
+}
+
+func (options *Html) TocHeader(text []byte, level int) {
+	options.TocHeaderWithAnchor(text, level, "")
+}
+
+func (options *Html) TocFinalize() {
+	for options.currentLevel > 1 {
+		options.toc.WriteString("</ul></li>\n")
+		options.currentLevel--
+	}
+
+	if options.currentLevel > 0 {
+		options.toc.WriteString("</ul>\n")
+	}
+}
+
+func isHtmlTag(tag []byte, tagname string) bool {
+	found, _ := findHtmlTagPos(tag, tagname)
+	return found
+}
+
+// Look for a character, but ignore it when it's in any kind of quotes, it
+// might be JavaScript
+func skipUntilCharIgnoreQuotes(html []byte, start int, char byte) int {
+	inSingleQuote := false
+	inDoubleQuote := false
+	inGraveQuote := false
+	i := start
+	for i < len(html) {
+		switch {
+		case html[i] == char && !inSingleQuote && !inDoubleQuote && !inGraveQuote:
+			return i
+		case html[i] == '\'':
+			inSingleQuote = !inSingleQuote
+		case html[i] == '"':
+			inDoubleQuote = !inDoubleQuote
+		case html[i] == '`':
+			inGraveQuote = !inGraveQuote
+		}
+		i++
+	}
+	return start
+}
+
+func findHtmlTagPos(tag []byte, tagname string) (bool, int) {
+	i := 0
+	if i < len(tag) && tag[0] != '<' {
+		return false, -1
+	}
+	i++
+	i = skipSpace(tag, i)
+
+	if i < len(tag) && tag[i] == '/' {
+		i++
+	}
+
+	i = skipSpace(tag, i)
+	j := 0
+	for ; i < len(tag); i, j = i+1, j+1 {
+		if j >= len(tagname) {
+			break
+		}
+
+		if strings.ToLower(string(tag[i]))[0] != tagname[j] {
+			return false, -1
+		}
+	}
+
+	if i == len(tag) {
+		return false, -1
+	}
+
+	rightAngle := skipUntilCharIgnoreQuotes(tag, i, '>')
+	if rightAngle > i {
+		return true, rightAngle
+	}
+
+	return false, -1
+}
+
+func skipUntilChar(text []byte, start int, char byte) int {
+	i := start
+	for i < len(text) && text[i] != char {
+		i++
+	}
+	return i
+}
+
+func skipSpace(tag []byte, i int) int {
+	for i < len(tag) && isspace(tag[i]) {
+		i++
+	}
+	return i
+}
+
+func skipChar(data []byte, start int, char byte) int {
+	i := start
+	for i < len(data) && data[i] == char {
+		i++
+	}
+	return i
+}
+
+func doubleSpace(out *bytes.Buffer) {
+	if out.Len() > 0 {
+		out.WriteByte('\n')
+	}
+}
+
+func isRelativeLink(link []byte) (yes bool) {
+	// a tag begin with '#'
+	if link[0] == '#' {
+		return true
+	}
+
+	// link begin with '/' but not '//', the second maybe a protocol relative link
+	if len(link) >= 2 && link[0] == '/' && link[1] != '/' {
+		return true
+	}
+
+	// only the root '/'
+	if len(link) == 1 && link[0] == '/' {
+		return true
+	}
+
+	// current directory : begin with "./"
+	if bytes.HasPrefix(link, []byte("./")) {
+		return true
+	}
+
+	// parent directory : begin with "../"
+	if bytes.HasPrefix(link, []byte("../")) {
+		return true
+	}
+
+	return false
+}
+
+func (options *Html) ensureUniqueHeaderID(id string) string {
+	for count, found := options.headerIDs[id]; found; count, found = options.headerIDs[id] {
+		tmp := fmt.Sprintf("%s-%d", id, count+1)
+
+		if _, tmpFound := options.headerIDs[tmp]; !tmpFound {
+			options.headerIDs[id] = count + 1
+			id = tmp
+		} else {
+			id = id + "-1"
+		}
+	}
+
+	if _, found := options.headerIDs[id]; !found {
+		options.headerIDs[id] = 0
+	}
+
+	return id
+}
diff --git a/vendor/github.com/russross/blackfriday/inline.go b/vendor/github.com/russross/blackfriday/inline.go
new file mode 100644
index 00000000..4483b8f1
--- /dev/null
+++ b/vendor/github.com/russross/blackfriday/inline.go
@@ -0,0 +1,1154 @@
+//
+// Blackfriday Markdown Processor
+// Available at http://github.com/russross/blackfriday
+//
+// Copyright © 2011 Russ Ross <russ@russross.com>.
+// Distributed under the Simplified BSD License.
+// See README.md for details.
+//
+
+//
+// Functions to parse inline elements.
+//
+
+package blackfriday
+
+import (
+	"bytes"
+	"regexp"
+	"strconv"
+)
+
+var (
+	urlRe    = `((https?|ftp):\/\/|\/)[-A-Za-z0-9+&@#\/%?=~_|!:,.;\(\)]+`
+	anchorRe = regexp.MustCompile(`^(<a\shref="` + urlRe + `"(\stitle="[^"<>]+")?\s?>` + urlRe + `<\/a>)`)
+)
+
+// Functions to parse text within a block
+// Each function returns the number of chars taken care of
+// data is the complete block being rendered
+// offset is the number of valid chars before the current cursor
+
+func (p *parser) inline(out *bytes.Buffer, data []byte) {
+	// this is called recursively: enforce a maximum depth
+	if p.nesting >= p.maxNesting {
+		return
+	}
+	p.nesting++
+
+	i, end := 0, 0
+	for i < len(data) {
+		// copy inactive chars into the output
+		for end < len(data) && p.inlineCallback[data[end]] == nil {
+			end++
+		}
+
+		p.r.NormalText(out, data[i:end])
+
+		if end >= len(data) {
+			break
+		}
+		i = end
+
+		// call the trigger
+		handler := p.inlineCallback[data[end]]
+		if consumed := handler(p, out, data, i); consumed == 0 {
+			// no action from the callback; buffer the byte for later
+			end = i + 1
+		} else {
+			// skip past whatever the callback used
+			i += consumed
+			end = i
+		}
+	}
+
+	p.nesting--
+}
+
+// single and double emphasis parsing
+func emphasis(p *parser, out *bytes.Buffer, data []byte, offset int) int {
+	data = data[offset:]
+	c := data[0]
+	ret := 0
+
+	if len(data) > 2 && data[1] != c {
+		// whitespace cannot follow an opening emphasis;
+		// strikethrough only takes two characters '~~'
+		if c == '~' || isspace(data[1]) {
+			return 0
+		}
+		if ret = helperEmphasis(p, out, data[1:], c); ret == 0 {
+			return 0
+		}
+
+		return ret + 1
+	}
+
+	if len(data) > 3 && data[1] == c && data[2] != c {
+		if isspace(data[2]) {
+			return 0
+		}
+		if ret = helperDoubleEmphasis(p, out, data[2:], c); ret == 0 {
+			return 0
+		}
+
+		return ret + 2
+	}
+
+	if len(data) > 4 && data[1] == c && data[2] == c && data[3] != c {
+		if c == '~' || isspace(data[3]) {
+			return 0
+		}
+		if ret = helperTripleEmphasis(p, out, data, 3, c); ret == 0 {
+			return 0
+		}
+
+		return ret + 3
+	}
+
+	return 0
+}
+
+func codeSpan(p *parser, out *bytes.Buffer, data []byte, offset int) int {
+	data = data[offset:]
+
+	nb := 0
+
+	// count the number of backticks in the delimiter
+	for nb < len(data) && data[nb] == '`' {
+		nb++
+	}
+
+	// find the next delimiter
+	i, end := 0, 0
+	for end = nb; end < len(data) && i < nb; end++ {
+		if data[end] == '`' {
+			i++
+		} else {
+			i = 0
+		}
+	}
+
+	// no matching delimiter?
+	if i < nb && end >= len(data) {
+		return 0
+	}
+
+	// trim outside whitespace
+	fBegin := nb
+	for fBegin < end && data[fBegin] == ' ' {
+		fBegin++
+	}
+
+	fEnd := end - nb
+	for fEnd > fBegin && data[fEnd-1] == ' ' {
+		fEnd--
+	}
+
+	// render the code span
+	if fBegin != fEnd {
+		p.r.CodeSpan(out, data[fBegin:fEnd])
+	}
+
+	return end
+
+}
+
+// newline preceded by two spaces becomes <br>
+// newline without two spaces works when EXTENSION_HARD_LINE_BREAK is enabled
+func lineBreak(p *parser, out *bytes.Buffer, data []byte, offset int) int {
+	// remove trailing spaces from out
+	outBytes := out.Bytes()
+	end := len(outBytes)
+	eol := end
+	for eol > 0 && outBytes[eol-1] == ' ' {
+		eol--
+	}
+	out.Truncate(eol)
+
+	precededByTwoSpaces := offset >= 2 && data[offset-2] == ' ' && data[offset-1] == ' '
+	precededByBackslash := offset >= 1 && data[offset-1] == '\\' // see http://spec.commonmark.org/0.18/#example-527
+	precededByBackslash = precededByBackslash && p.flags&EXTENSION_BACKSLASH_LINE_BREAK != 0
+
+	if p.flags&EXTENSION_JOIN_LINES != 0 {
+		return 1
+	}
+
+	// should there be a hard line break here?
+	if p.flags&EXTENSION_HARD_LINE_BREAK == 0 && !precededByTwoSpaces && !precededByBackslash {
+		return 0
+	}
+
+	if precededByBackslash && eol > 0 {
+		out.Truncate(eol - 1)
+	}
+	p.r.LineBreak(out)
+	return 1
+}
+
+type linkType int
+
+const (
+	linkNormal linkType = iota
+	linkImg
+	linkDeferredFootnote
+	linkInlineFootnote
+)
+
+func isReferenceStyleLink(data []byte, pos int, t linkType) bool {
+	if t == linkDeferredFootnote {
+		return false
+	}
+	return pos < len(data)-1 && data[pos] == '[' && data[pos+1] != '^'
+}
+
+// '[': parse a link or an image or a footnote
+func link(p *parser, out *bytes.Buffer, data []byte, offset int) int {
+	// no links allowed inside regular links, footnote, and deferred footnotes
+	if p.insideLink && (offset > 0 && data[offset-1] == '[' || len(data)-1 > offset && data[offset+1] == '^') {
+		return 0
+	}
+
+	var t linkType
+	switch {
+	// special case: ![^text] == deferred footnote (that follows something with
+	// an exclamation point)
+	case p.flags&EXTENSION_FOOTNOTES != 0 && len(data)-1 > offset && data[offset+1] == '^':
+		t = linkDeferredFootnote
+	// ![alt] == image
+	case offset > 0 && data[offset-1] == '!':
+		t = linkImg
+	// ^[text] == inline footnote
+	// [^refId] == deferred footnote
+	case p.flags&EXTENSION_FOOTNOTES != 0:
+		if offset > 0 && data[offset-1] == '^' {
+			t = linkInlineFootnote
+		} else if len(data)-1 > offset && data[offset+1] == '^' {
+			t = linkDeferredFootnote
+		}
+	// [text] == regular link
+	default:
+		t = linkNormal
+	}
+
+	data = data[offset:]
+
+	var (
+		i                       = 1
+		noteId                  int
+		title, link, altContent []byte
+		textHasNl               = false
+	)
+
+	if t == linkDeferredFootnote {
+		i++
+	}
+
+	brace := 0
+
+	// look for the matching closing bracket
+	for level := 1; level > 0 && i < len(data); i++ {
+		switch {
+		case data[i] == '\n':
+			textHasNl = true
+
+		case data[i-1] == '\\':
+			continue
+
+		case data[i] == '[':
+			level++
+
+		case data[i] == ']':
+			level--
+			if level <= 0 {
+				i-- // compensate for extra i++ in for loop
+			}
+		}
+	}
+
+	if i >= len(data) {
+		return 0
+	}
+
+	txtE := i
+	i++
+
+	// skip any amount of whitespace or newline
+	// (this is much more lax than original markdown syntax)
+	for i < len(data) && isspace(data[i]) {
+		i++
+	}
+
+	switch {
+	// inline style link
+	case i < len(data) && data[i] == '(':
+		// skip initial whitespace
+		i++
+
+		for i < len(data) && isspace(data[i]) {
+			i++
+		}
+
+		linkB := i
+
+		// look for link end: ' " ), check for new opening braces and take this
+		// into account, this may lead for overshooting and probably will require
+		// some fine-tuning.
+	findlinkend:
+		for i < len(data) {
+			switch {
+			case data[i] == '\\':
+				i += 2
+
+			case data[i] == '(':
+				brace++
+				i++
+
+			case data[i] == ')':
+				if brace <= 0 {
+					break findlinkend
+				}
+				brace--
+				i++
+
+			case data[i] == '\'' || data[i] == '"':
+				break findlinkend
+
+			default:
+				i++
+			}
+		}
+
+		if i >= len(data) {
+			return 0
+		}
+		linkE := i
+
+		// look for title end if present
+		titleB, titleE := 0, 0
+		if data[i] == '\'' || data[i] == '"' {
+			i++
+			titleB = i
+
+		findtitleend:
+			for i < len(data) {
+				switch {
+				case data[i] == '\\':
+					i += 2
+
+				case data[i] == ')':
+					break findtitleend
+
+				default:
+					i++
+				}
+			}
+
+			if i >= len(data) {
+				return 0
+			}
+
+			// skip whitespace after title
+			titleE = i - 1
+			for titleE > titleB && isspace(data[titleE]) {
+				titleE--
+			}
+
+			// check for closing quote presence
+			if data[titleE] != '\'' && data[titleE] != '"' {
+				titleB, titleE = 0, 0
+				linkE = i
+			}
+		}
+
+		// remove whitespace at the end of the link
+		for linkE > linkB && isspace(data[linkE-1]) {
+			linkE--
+		}
+
+		// remove optional angle brackets around the link
+		if data[linkB] == '<' {
+			linkB++
+		}
+		if data[linkE-1] == '>' {
+			linkE--
+		}
+
+		// build escaped link and title
+		if linkE > linkB {
+			link = data[linkB:linkE]
+		}
+
+		if titleE > titleB {
+			title = data[titleB:titleE]
+		}
+
+		i++
+
+	// reference style link
+	case isReferenceStyleLink(data, i, t):
+		var id []byte
+		altContentConsidered := false
+
+		// look for the id
+		i++
+		linkB := i
+		for i < len(data) && data[i] != ']' {
+			i++
+		}
+		if i >= len(data) {
+			return 0
+		}
+		linkE := i
+
+		// find the reference
+		if linkB == linkE {
+			if textHasNl {
+				var b bytes.Buffer
+
+				for j := 1; j < txtE; j++ {
+					switch {
+					case data[j] != '\n':
+						b.WriteByte(data[j])
+					case data[j-1] != ' ':
+						b.WriteByte(' ')
+					}
+				}
+
+				id = b.Bytes()
+			} else {
+				id = data[1:txtE]
+				altContentConsidered = true
+			}
+		} else {
+			id = data[linkB:linkE]
+		}
+
+		// find the reference with matching id
+		lr, ok := p.getRef(string(id))
+		if !ok {
+			return 0
+		}
+
+		// keep link and title from reference
+		link = lr.link
+		title = lr.title
+		if altContentConsidered {
+			altContent = lr.text
+		}
+		i++
+
+	// shortcut reference style link or reference or inline footnote
+	default:
+		var id []byte
+
+		// craft the id
+		if textHasNl {
+			var b bytes.Buffer
+
+			for j := 1; j < txtE; j++ {
+				switch {
+				case data[j] != '\n':
+					b.WriteByte(data[j])
+				case data[j-1] != ' ':
+					b.WriteByte(' ')
+				}
+			}
+
+			id = b.Bytes()
+		} else {
+			if t == linkDeferredFootnote {
+				id = data[2:txtE] // get rid of the ^
+			} else {
+				id = data[1:txtE]
+			}
+		}
+
+		if t == linkInlineFootnote {
+			// create a new reference
+			noteId = len(p.notes) + 1
+
+			var fragment []byte
+			if len(id) > 0 {
+				if len(id) < 16 {
+					fragment = make([]byte, len(id))
+				} else {
+					fragment = make([]byte, 16)
+				}
+				copy(fragment, slugify(id))
+			} else {
+				fragment = append([]byte("footnote-"), []byte(strconv.Itoa(noteId))...)
+			}
+
+			ref := &reference{
+				noteId:   noteId,
+				hasBlock: false,
+				link:     fragment,
+				title:    id,
+			}
+
+			p.notes = append(p.notes, ref)
+			p.notesRecord[string(ref.link)] = struct{}{}
+
+			link = ref.link
+			title = ref.title
+		} else {
+			// find the reference with matching id
+			lr, ok := p.getRef(string(id))
+			if !ok {
+				return 0
+			}
+
+			if t == linkDeferredFootnote && !p.isFootnote(lr) {
+				lr.noteId = len(p.notes) + 1
+				p.notes = append(p.notes, lr)
+				p.notesRecord[string(lr.link)] = struct{}{}
+			}
+
+			// keep link and title from reference
+			link = lr.link
+			// if inline footnote, title == footnote contents
+			title = lr.title
+			noteId = lr.noteId
+		}
+
+		// rewind the whitespace
+		i = txtE + 1
+	}
+
+	// build content: img alt is escaped, link content is parsed
+	var content bytes.Buffer
+	if txtE > 1 {
+		if t == linkImg {
+			content.Write(data[1:txtE])
+		} else {
+			// links cannot contain other links, so turn off link parsing temporarily
+			insideLink := p.insideLink
+			p.insideLink = true
+			p.inline(&content, data[1:txtE])
+			p.insideLink = insideLink
+		}
+	}
+
+	var uLink []byte
+	if t == linkNormal || t == linkImg {
+		if len(link) > 0 {
+			var uLinkBuf bytes.Buffer
+			unescapeText(&uLinkBuf, link)
+			uLink = uLinkBuf.Bytes()
+		}
+
+		// links need something to click on and somewhere to go
+		if len(uLink) == 0 || (t == linkNormal && content.Len() == 0) {
+			return 0
+		}
+	}
+
+	// call the relevant rendering function
+	switch t {
+	case linkNormal:
+		if len(altContent) > 0 {
+			p.r.Link(out, uLink, title, altContent)
+		} else {
+			p.r.Link(out, uLink, title, content.Bytes())
+		}
+
+	case linkImg:
+		outSize := out.Len()
+		outBytes := out.Bytes()
+		if outSize > 0 && outBytes[outSize-1] == '!' {
+			out.Truncate(outSize - 1)
+		}
+
+		p.r.Image(out, uLink, title, content.Bytes())
+
+	case linkInlineFootnote:
+		outSize := out.Len()
+		outBytes := out.Bytes()
+		if outSize > 0 && outBytes[outSize-1] == '^' {
+			out.Truncate(outSize - 1)
+		}
+
+		p.r.FootnoteRef(out, link, noteId)
+
+	case linkDeferredFootnote:
+		p.r.FootnoteRef(out, link, noteId)
+
+	default:
+		return 0
+	}
+
+	return i
+}
+
+func (p *parser) inlineHTMLComment(out *bytes.Buffer, data []byte) int {
+	if len(data) < 5 {
+		return 0
+	}
+	if data[0] != '<' || data[1] != '!' || data[2] != '-' || data[3] != '-' {
+		return 0
+	}
+	i := 5
+	// scan for an end-of-comment marker, across lines if necessary
+	for i < len(data) && !(data[i-2] == '-' && data[i-1] == '-' && data[i] == '>') {
+		i++
+	}
+	// no end-of-comment marker
+	if i >= len(data) {
+		return 0
+	}
+	return i + 1
+}
+
+// '<' when tags or autolinks are allowed
+func leftAngle(p *parser, out *bytes.Buffer, data []byte, offset int) int {
+	data = data[offset:]
+	altype := LINK_TYPE_NOT_AUTOLINK
+	end := tagLength(data, &altype)
+	if size := p.inlineHTMLComment(out, data); size > 0 {
+		end = size
+	}
+	if end > 2 {
+		if altype != LINK_TYPE_NOT_AUTOLINK {
+			var uLink bytes.Buffer
+			unescapeText(&uLink, data[1:end+1-2])
+			if uLink.Len() > 0 {
+				p.r.AutoLink(out, uLink.Bytes(), altype)
+			}
+		} else {
+			p.r.RawHtmlTag(out, data[:end])
+		}
+	}
+
+	return end
+}
+
+// '\\' backslash escape
+var escapeChars = []byte("\\`*_{}[]()#+-.!:|&<>~")
+
+func escape(p *parser, out *bytes.Buffer, data []byte, offset int) int {
+	data = data[offset:]
+
+	if len(data) > 1 {
+		if bytes.IndexByte(escapeChars, data[1]) < 0 {
+			return 0
+		}
+
+		p.r.NormalText(out, data[1:2])
+	}
+
+	return 2
+}
+
+func unescapeText(ob *bytes.Buffer, src []byte) {
+	i := 0
+	for i < len(src) {
+		org := i
+		for i < len(src) && src[i] != '\\' {
+			i++
+		}
+
+		if i > org {
+			ob.Write(src[org:i])
+		}
+
+		if i+1 >= len(src) {
+			break
+		}
+
+		ob.WriteByte(src[i+1])
+		i += 2
+	}
+}
+
+// '&' escaped when it doesn't belong to an entity
+// valid entities are assumed to be anything matching &#?[A-Za-z0-9]+;
+func entity(p *parser, out *bytes.Buffer, data []byte, offset int) int {
+	data = data[offset:]
+
+	end := 1
+
+	if end < len(data) && data[end] == '#' {
+		end++
+	}
+
+	for end < len(data) && isalnum(data[end]) {
+		end++
+	}
+
+	if end < len(data) && data[end] == ';' {
+		end++ // real entity
+	} else {
+		return 0 // lone '&'
+	}
+
+	p.r.Entity(out, data[:end])
+
+	return end
+}
+
+func linkEndsWithEntity(data []byte, linkEnd int) bool {
+	entityRanges := htmlEntity.FindAllIndex(data[:linkEnd], -1)
+	return entityRanges != nil && entityRanges[len(entityRanges)-1][1] == linkEnd
+}
+
+func autoLink(p *parser, out *bytes.Buffer, data []byte, offset int) int {
+	// quick check to rule out most false hits on ':'
+	if p.insideLink || len(data) < offset+3 || data[offset+1] != '/' || data[offset+2] != '/' {
+		return 0
+	}
+
+	// Now a more expensive check to see if we're not inside an anchor element
+	anchorStart := offset
+	offsetFromAnchor := 0
+	for anchorStart > 0 && data[anchorStart] != '<' {
+		anchorStart--
+		offsetFromAnchor++
+	}
+
+	anchorStr := anchorRe.Find(data[anchorStart:])
+	if anchorStr != nil {
+		out.Write(anchorStr[offsetFromAnchor:])
+		return len(anchorStr) - offsetFromAnchor
+	}
+
+	// scan backward for a word boundary
+	rewind := 0
+	for offset-rewind > 0 && rewind <= 7 && isletter(data[offset-rewind-1]) {
+		rewind++
+	}
+	if rewind > 6 { // longest supported protocol is "mailto" which has 6 letters
+		return 0
+	}
+
+	origData := data
+	data = data[offset-rewind:]
+
+	if !isSafeLink(data) {
+		return 0
+	}
+
+	linkEnd := 0
+	for linkEnd < len(data) && !isEndOfLink(data[linkEnd]) {
+		linkEnd++
+	}
+
+	// Skip punctuation at the end of the link
+	if (data[linkEnd-1] == '.' || data[linkEnd-1] == ',') && data[linkEnd-2] != '\\' {
+		linkEnd--
+	}
+
+	// But don't skip semicolon if it's a part of escaped entity:
+	if data[linkEnd-1] == ';' && data[linkEnd-2] != '\\' && !linkEndsWithEntity(data, linkEnd) {
+		linkEnd--
+	}
+
+	// See if the link finishes with a punctuation sign that can be closed.
+	var copen byte
+	switch data[linkEnd-1] {
+	case '"':
+		copen = '"'
+	case '\'':
+		copen = '\''
+	case ')':
+		copen = '('
+	case ']':
+		copen = '['
+	case '}':
+		copen = '{'
+	default:
+		copen = 0
+	}
+
+	if copen != 0 {
+		bufEnd := offset - rewind + linkEnd - 2
+
+		openDelim := 1
+
+		/* Try to close the final punctuation sign in this same line;
+		 * if we managed to close it outside of the URL, that means that it's
+		 * not part of the URL. If it closes inside the URL, that means it
+		 * is part of the URL.
+		 *
+		 * Examples:
+		 *
+		 *      foo http://www.pokemon.com/Pikachu_(Electric) bar
+		 *              => http://www.pokemon.com/Pikachu_(Electric)
+		 *
+		 *      foo (http://www.pokemon.com/Pikachu_(Electric)) bar
+		 *              => http://www.pokemon.com/Pikachu_(Electric)
+		 *
+		 *      foo http://www.pokemon.com/Pikachu_(Electric)) bar
+		 *              => http://www.pokemon.com/Pikachu_(Electric))
+		 *
+		 *      (foo http://www.pokemon.com/Pikachu_(Electric)) bar
+		 *              => foo http://www.pokemon.com/Pikachu_(Electric)
+		 */
+
+		for bufEnd >= 0 && origData[bufEnd] != '\n' && openDelim != 0 {
+			if origData[bufEnd] == data[linkEnd-1] {
+				openDelim++
+			}
+
+			if origData[bufEnd] == copen {
+				openDelim--
+			}
+
+			bufEnd--
+		}
+
+		if openDelim == 0 {
+			linkEnd--
+		}
+	}
+
+	// we were triggered on the ':', so we need to rewind the output a bit
+	if out.Len() >= rewind {
+		out.Truncate(len(out.Bytes()) - rewind)
+	}
+
+	var uLink bytes.Buffer
+	unescapeText(&uLink, data[:linkEnd])
+
+	if uLink.Len() > 0 {
+		p.r.AutoLink(out, uLink.Bytes(), LINK_TYPE_NORMAL)
+	}
+
+	return linkEnd - rewind
+}
+
+func isEndOfLink(char byte) bool {
+	return isspace(char) || char == '<'
+}
+
+var validUris = [][]byte{[]byte("http://"), []byte("https://"), []byte("ftp://"), []byte("mailto://")}
+var validPaths = [][]byte{[]byte("/"), []byte("./"), []byte("../")}
+
+func isSafeLink(link []byte) bool {
+	for _, path := range validPaths {
+		if len(link) >= len(path) && bytes.Equal(link[:len(path)], path) {
+			if len(link) == len(path) {
+				return true
+			} else if isalnum(link[len(path)]) {
+				return true
+			}
+		}
+	}
+
+	for _, prefix := range validUris {
+		// TODO: handle unicode here
+		// case-insensitive prefix test
+		if len(link) > len(prefix) && bytes.Equal(bytes.ToLower(link[:len(prefix)]), prefix) && isalnum(link[len(prefix)]) {
+			return true
+		}
+	}
+
+	return false
+}
+
+// return the length of the given tag, or 0 is it's not valid
+func tagLength(data []byte, autolink *int) int {
+	var i, j int
+
+	// a valid tag can't be shorter than 3 chars
+	if len(data) < 3 {
+		return 0
+	}
+
+	// begins with a '<' optionally followed by '/', followed by letter or number
+	if data[0] != '<' {
+		return 0
+	}
+	if data[1] == '/' {
+		i = 2
+	} else {
+		i = 1
+	}
+
+	if !isalnum(data[i]) {
+		return 0
+	}
+
+	// scheme test
+	*autolink = LINK_TYPE_NOT_AUTOLINK
+
+	// try to find the beginning of an URI
+	for i < len(data) && (isalnum(data[i]) || data[i] == '.' || data[i] == '+' || data[i] == '-') {
+		i++
+	}
+
+	if i > 1 && i < len(data) && data[i] == '@' {
+		if j = isMailtoAutoLink(data[i:]); j != 0 {
+			*autolink = LINK_TYPE_EMAIL
+			return i + j
+		}
+	}
+
+	if i > 2 && i < len(data) && data[i] == ':' {
+		*autolink = LINK_TYPE_NORMAL
+		i++
+	}
+
+	// complete autolink test: no whitespace or ' or "
+	switch {
+	case i >= len(data):
+		*autolink = LINK_TYPE_NOT_AUTOLINK
+	case *autolink != 0:
+		j = i
+
+		for i < len(data) {
+			if data[i] == '\\' {
+				i += 2
+			} else if data[i] == '>' || data[i] == '\'' || data[i] == '"' || isspace(data[i]) {
+				break
+			} else {
+				i++
+			}
+
+		}
+
+		if i >= len(data) {
+			return 0
+		}
+		if i > j && data[i] == '>' {
+			return i + 1
+		}
+
+		// one of the forbidden chars has been found
+		*autolink = LINK_TYPE_NOT_AUTOLINK
+	}
+
+	// look for something looking like a tag end
+	for i < len(data) && data[i] != '>' {
+		i++
+	}
+	if i >= len(data) {
+		return 0
+	}
+	return i + 1
+}
+
+// look for the address part of a mail autolink and '>'
+// this is less strict than the original markdown e-mail address matching
+func isMailtoAutoLink(data []byte) int {
+	nb := 0
+
+	// address is assumed to be: [-@._a-zA-Z0-9]+ with exactly one '@'
+	for i := 0; i < len(data); i++ {
+		if isalnum(data[i]) {
+			continue
+		}
+
+		switch data[i] {
+		case '@':
+			nb++
+
+		case '-', '.', '_':
+			// Do nothing.
+
+		case '>':
+			if nb == 1 {
+				return i + 1
+			} else {
+				return 0
+			}
+		default:
+			return 0
+		}
+	}
+
+	return 0
+}
+
+// look for the next emph char, skipping other constructs
+func helperFindEmphChar(data []byte, c byte) int {
+	i := 0
+
+	for i < len(data) {
+		for i < len(data) && data[i] != c && data[i] != '`' && data[i] != '[' {
+			i++
+		}
+		if i >= len(data) {
+			return 0
+		}
+		// do not count escaped chars
+		if i != 0 && data[i-1] == '\\' {
+			i++
+			continue
+		}
+		if data[i] == c {
+			return i
+		}
+
+		if data[i] == '`' {
+			// skip a code span
+			tmpI := 0
+			i++
+			for i < len(data) && data[i] != '`' {
+				if tmpI == 0 && data[i] == c {
+					tmpI = i
+				}
+				i++
+			}
+			if i >= len(data) {
+				return tmpI
+			}
+			i++
+		} else if data[i] == '[' {
+			// skip a link
+			tmpI := 0
+			i++
+			for i < len(data) && data[i] != ']' {
+				if tmpI == 0 && data[i] == c {
+					tmpI = i
+				}
+				i++
+			}
+			i++
+			for i < len(data) && (data[i] == ' ' || data[i] == '\n') {
+				i++
+			}
+			if i >= len(data) {
+				return tmpI
+			}
+			if data[i] != '[' && data[i] != '(' { // not a link
+				if tmpI > 0 {
+					return tmpI
+				} else {
+					continue
+				}
+			}
+			cc := data[i]
+			i++
+			for i < len(data) && data[i] != cc {
+				if tmpI == 0 && data[i] == c {
+					return i
+				}
+				i++
+			}
+			if i >= len(data) {
+				return tmpI
+			}
+			i++
+		}
+	}
+	return 0
+}
+
+func helperEmphasis(p *parser, out *bytes.Buffer, data []byte, c byte) int {
+	i := 0
+
+	// skip one symbol if coming from emph3
+	if len(data) > 1 && data[0] == c && data[1] == c {
+		i = 1
+	}
+
+	for i < len(data) {
+		length := helperFindEmphChar(data[i:], c)
+		if length == 0 {
+			return 0
+		}
+		i += length
+		if i >= len(data) {
+			return 0
+		}
+
+		if i+1 < len(data) && data[i+1] == c {
+			i++
+			continue
+		}
+
+		if data[i] == c && !isspace(data[i-1]) {
+
+			if p.flags&EXTENSION_NO_INTRA_EMPHASIS != 0 {
+				if !(i+1 == len(data) || isspace(data[i+1]) || ispunct(data[i+1])) {
+					continue
+				}
+			}
+
+			var work bytes.Buffer
+			p.inline(&work, data[:i])
+			p.r.Emphasis(out, work.Bytes())
+			return i + 1
+		}
+	}
+
+	return 0
+}
+
+func helperDoubleEmphasis(p *parser, out *bytes.Buffer, data []byte, c byte) int {
+	i := 0
+
+	for i < len(data) {
+		length := helperFindEmphChar(data[i:], c)
+		if length == 0 {
+			return 0
+		}
+		i += length
+
+		if i+1 < len(data) && data[i] == c && data[i+1] == c && i > 0 && !isspace(data[i-1]) {
+			var work bytes.Buffer
+			p.inline(&work, data[:i])
+
+			if work.Len() > 0 {
+				// pick the right renderer
+				if c == '~' {
+					p.r.StrikeThrough(out, work.Bytes())
+				} else {
+					p.r.DoubleEmphasis(out, work.Bytes())
+				}
+			}
+			return i + 2
+		}
+		i++
+	}
+	return 0
+}
+
+func helperTripleEmphasis(p *parser, out *bytes.Buffer, data []byte, offset int, c byte) int {
+	i := 0
+	origData := data
+	data = data[offset:]
+
+	for i < len(data) {
+		length := helperFindEmphChar(data[i:], c)
+		if length == 0 {
+			return 0
+		}
+		i += length
+
+		// skip whitespace preceded symbols
+		if data[i] != c || isspace(data[i-1]) {
+			continue
+		}
+
+		switch {
+		case i+2 < len(data) && data[i+1] == c && data[i+2] == c:
+			// triple symbol found
+			var work bytes.Buffer
+
+			p.inline(&work, data[:i])
+			if work.Len() > 0 {
+				p.r.TripleEmphasis(out, work.Bytes())
+			}
+			return i + 3
+		case (i+1 < len(data) && data[i+1] == c):
+			// double symbol found, hand over to emph1
+			length = helperEmphasis(p, out, origData[offset-2:], c)
+			if length == 0 {
+				return 0
+			} else {
+				return length - 2
+			}
+		default:
+			// single symbol found, hand over to emph2
+			length = helperDoubleEmphasis(p, out, origData[offset-1:], c)
+			if length == 0 {
+				return 0
+			} else {
+				return length - 1
+			}
+		}
+	}
+	return 0
+}
diff --git a/vendor/github.com/russross/blackfriday/latex.go b/vendor/github.com/russross/blackfriday/latex.go
new file mode 100644
index 00000000..3d30d094
--- /dev/null
+++ b/vendor/github.com/russross/blackfriday/latex.go
@@ -0,0 +1,334 @@
+//
+// Blackfriday Markdown Processor
+// Available at http://github.com/russross/blackfriday
+//
+// Copyright © 2011 Russ Ross <russ@russross.com>.
+// Distributed under the Simplified BSD License.
+// See README.md for details.
+//
+
+//
+//
+// LaTeX rendering backend
+//
+//
+
+package blackfriday
+
+import (
+	"bytes"
+	"strings"
+)
+
+// Latex is a type that implements the Renderer interface for LaTeX output.
+//
+// Do not create this directly, instead use the LatexRenderer function.
+type Latex struct {
+}
+
+// LatexRenderer creates and configures a Latex object, which
+// satisfies the Renderer interface.
+//
+// flags is a set of LATEX_* options ORed together (currently no such options
+// are defined).
+func LatexRenderer(flags int) Renderer {
+	return &Latex{}
+}
+
+func (options *Latex) GetFlags() int {
+	return 0
+}
+
+// render code chunks using verbatim, or listings if we have a language
+func (options *Latex) BlockCode(out *bytes.Buffer, text []byte, info string) {
+	if info == "" {
+		out.WriteString("\n\\begin{verbatim}\n")
+	} else {
+		lang := strings.Fields(info)[0]
+		out.WriteString("\n\\begin{lstlisting}[language=")
+		out.WriteString(lang)
+		out.WriteString("]\n")
+	}
+	out.Write(text)
+	if info == "" {
+		out.WriteString("\n\\end{verbatim}\n")
+	} else {
+		out.WriteString("\n\\end{lstlisting}\n")
+	}
+}
+
+func (options *Latex) TitleBlock(out *bytes.Buffer, text []byte) {
+
+}
+
+func (options *Latex) BlockQuote(out *bytes.Buffer, text []byte) {
+	out.WriteString("\n\\begin{quotation}\n")
+	out.Write(text)
+	out.WriteString("\n\\end{quotation}\n")
+}
+
+func (options *Latex) BlockHtml(out *bytes.Buffer, text []byte) {
+	// a pretty lame thing to do...
+	out.WriteString("\n\\begin{verbatim}\n")
+	out.Write(text)
+	out.WriteString("\n\\end{verbatim}\n")
+}
+
+func (options *Latex) Header(out *bytes.Buffer, text func() bool, level int, id string) {
+	marker := out.Len()
+
+	switch level {
+	case 1:
+		out.WriteString("\n\\section{")
+	case 2:
+		out.WriteString("\n\\subsection{")
+	case 3:
+		out.WriteString("\n\\subsubsection{")
+	case 4:
+		out.WriteString("\n\\paragraph{")
+	case 5:
+		out.WriteString("\n\\subparagraph{")
+	case 6:
+		out.WriteString("\n\\textbf{")
+	}
+	if !text() {
+		out.Truncate(marker)
+		return
+	}
+	out.WriteString("}\n")
+}
+
+func (options *Latex) HRule(out *bytes.Buffer) {
+	out.WriteString("\n\\HRule\n")
+}
+
+func (options *Latex) List(out *bytes.Buffer, text func() bool, flags int) {
+	marker := out.Len()
+	if flags&LIST_TYPE_ORDERED != 0 {
+		out.WriteString("\n\\begin{enumerate}\n")
+	} else {
+		out.WriteString("\n\\begin{itemize}\n")
+	}
+	if !text() {
+		out.Truncate(marker)
+		return
+	}
+	if flags&LIST_TYPE_ORDERED != 0 {
+		out.WriteString("\n\\end{enumerate}\n")
+	} else {
+		out.WriteString("\n\\end{itemize}\n")
+	}
+}
+
+func (options *Latex) ListItem(out *bytes.Buffer, text []byte, flags int) {
+	out.WriteString("\n\\item ")
+	out.Write(text)
+}
+
+func (options *Latex) Paragraph(out *bytes.Buffer, text func() bool) {
+	marker := out.Len()
+	out.WriteString("\n")
+	if !text() {
+		out.Truncate(marker)
+		return
+	}
+	out.WriteString("\n")
+}
+
+func (options *Latex) Table(out *bytes.Buffer, header []byte, body []byte, columnData []int) {
+	out.WriteString("\n\\begin{tabular}{")
+	for _, elt := range columnData {
+		switch elt {
+		case TABLE_ALIGNMENT_LEFT:
+			out.WriteByte('l')
+		case TABLE_ALIGNMENT_RIGHT:
+			out.WriteByte('r')
+		default:
+			out.WriteByte('c')
+		}
+	}
+	out.WriteString("}\n")
+	out.Write(header)
+	out.WriteString(" \\\\\n\\hline\n")
+	out.Write(body)
+	out.WriteString("\n\\end{tabular}\n")
+}
+
+func (options *Latex) TableRow(out *bytes.Buffer, text []byte) {
+	if out.Len() > 0 {
+		out.WriteString(" \\\\\n")
+	}
+	out.Write(text)
+}
+
+func (options *Latex) TableHeaderCell(out *bytes.Buffer, text []byte, align int) {
+	if out.Len() > 0 {
+		out.WriteString(" & ")
+	}
+	out.Write(text)
+}
+
+func (options *Latex) TableCell(out *bytes.Buffer, text []byte, align int) {
+	if out.Len() > 0 {
+		out.WriteString(" & ")
+	}
+	out.Write(text)
+}
+
+// TODO: this
+func (options *Latex) Footnotes(out *bytes.Buffer, text func() bool) {
+
+}
+
+func (options *Latex) FootnoteItem(out *bytes.Buffer, name, text []byte, flags int) {
+
+}
+
+func (options *Latex) AutoLink(out *bytes.Buffer, link []byte, kind int) {
+	out.WriteString("\\href{")
+	if kind == LINK_TYPE_EMAIL {
+		out.WriteString("mailto:")
+	}
+	out.Write(link)
+	out.WriteString("}{")
+	out.Write(link)
+	out.WriteString("}")
+}
+
+func (options *Latex) CodeSpan(out *bytes.Buffer, text []byte) {
+	out.WriteString("\\texttt{")
+	escapeSpecialChars(out, text)
+	out.WriteString("}")
+}
+
+func (options *Latex) DoubleEmphasis(out *bytes.Buffer, text []byte) {
+	out.WriteString("\\textbf{")
+	out.Write(text)
+	out.WriteString("}")
+}
+
+func (options *Latex) Emphasis(out *bytes.Buffer, text []byte) {
+	out.WriteString("\\textit{")
+	out.Write(text)
+	out.WriteString("}")
+}
+
+func (options *Latex) Image(out *bytes.Buffer, link []byte, title []byte, alt []byte) {
+	if bytes.HasPrefix(link, []byte("http://")) || bytes.HasPrefix(link, []byte("https://")) {
+		// treat it like a link
+		out.WriteString("\\href{")
+		out.Write(link)
+		out.WriteString("}{")
+		out.Write(alt)
+		out.WriteString("}")
+	} else {
+		out.WriteString("\\includegraphics{")
+		out.Write(link)
+		out.WriteString("}")
+	}
+}
+
+func (options *Latex) LineBreak(out *bytes.Buffer) {
+	out.WriteString(" \\\\\n")
+}
+
+func (options *Latex) Link(out *bytes.Buffer, link []byte, title []byte, content []byte) {
+	out.WriteString("\\href{")
+	out.Write(link)
+	out.WriteString("}{")
+	out.Write(content)
+	out.WriteString("}")
+}
+
+func (options *Latex) RawHtmlTag(out *bytes.Buffer, tag []byte) {
+}
+
+func (options *Latex) TripleEmphasis(out *bytes.Buffer, text []byte) {
+	out.WriteString("\\textbf{\\textit{")
+	out.Write(text)
+	out.WriteString("}}")
+}
+
+func (options *Latex) StrikeThrough(out *bytes.Buffer, text []byte) {
+	out.WriteString("\\sout{")
+	out.Write(text)
+	out.WriteString("}")
+}
+
+// TODO: this
+func (options *Latex) FootnoteRef(out *bytes.Buffer, ref []byte, id int) {
+
+}
+
+func needsBackslash(c byte) bool {
+	for _, r := range []byte("_{}%$&\\~#") {
+		if c == r {
+			return true
+		}
+	}
+	return false
+}
+
+func escapeSpecialChars(out *bytes.Buffer, text []byte) {
+	for i := 0; i < len(text); i++ {
+		// directly copy normal characters
+		org := i
+
+		for i < len(text) && !needsBackslash(text[i]) {
+			i++
+		}
+		if i > org {
+			out.Write(text[org:i])
+		}
+
+		// escape a character
+		if i >= len(text) {
+			break
+		}
+		out.WriteByte('\\')
+		out.WriteByte(text[i])
+	}
+}
+
+func (options *Latex) Entity(out *bytes.Buffer, entity []byte) {
+	// TODO: convert this into a unicode character or something
+	out.Write(entity)
+}
+
+func (options *Latex) NormalText(out *bytes.Buffer, text []byte) {
+	escapeSpecialChars(out, text)
+}
+
+// header and footer
+func (options *Latex) DocumentHeader(out *bytes.Buffer) {
+	out.WriteString("\\documentclass{article}\n")
+	out.WriteString("\n")
+	out.WriteString("\\usepackage{graphicx}\n")
+	out.WriteString("\\usepackage{listings}\n")
+	out.WriteString("\\usepackage[margin=1in]{geometry}\n")
+	out.WriteString("\\usepackage[utf8]{inputenc}\n")
+	out.WriteString("\\usepackage{verbatim}\n")
+	out.WriteString("\\usepackage[normalem]{ulem}\n")
+	out.WriteString("\\usepackage{hyperref}\n")
+	out.WriteString("\n")
+	out.WriteString("\\hypersetup{colorlinks,%\n")
+	out.WriteString("  citecolor=black,%\n")
+	out.WriteString("  filecolor=black,%\n")
+	out.WriteString("  linkcolor=black,%\n")
+	out.WriteString("  urlcolor=black,%\n")
+	out.WriteString("  pdfstartview=FitH,%\n")
+	out.WriteString("  breaklinks=true,%\n")
+	out.WriteString("  pdfauthor={Blackfriday Markdown Processor v")
+	out.WriteString(VERSION)
+	out.WriteString("}}\n")
+	out.WriteString("\n")
+	out.WriteString("\\newcommand{\\HRule}{\\rule{\\linewidth}{0.5mm}}\n")
+	out.WriteString("\\addtolength{\\parskip}{0.5\\baselineskip}\n")
+	out.WriteString("\\parindent=0pt\n")
+	out.WriteString("\n")
+	out.WriteString("\\begin{document}\n")
+}
+
+func (options *Latex) DocumentFooter(out *bytes.Buffer) {
+	out.WriteString("\n\\end{document}\n")
+}
diff --git a/vendor/github.com/russross/blackfriday/markdown.go b/vendor/github.com/russross/blackfriday/markdown.go
new file mode 100644
index 00000000..41595d62
--- /dev/null
+++ b/vendor/github.com/russross/blackfriday/markdown.go
@@ -0,0 +1,941 @@
+//
+// Blackfriday Markdown Processor
+// Available at http://github.com/russross/blackfriday
+//
+// Copyright © 2011 Russ Ross <russ@russross.com>.
+// Distributed under the Simplified BSD License.
+// See README.md for details.
+//
+
+//
+//
+// Markdown parsing and processing
+//
+//
+
+package blackfriday
+
+import (
+	"bytes"
+	"fmt"
+	"strings"
+	"unicode/utf8"
+)
+
+const VERSION = "1.5"
+
+// These are the supported markdown parsing extensions.
+// OR these values together to select multiple extensions.
+const (
+	EXTENSION_NO_INTRA_EMPHASIS          = 1 << iota // ignore emphasis markers inside words
+	EXTENSION_TABLES                                 // render tables
+	EXTENSION_FENCED_CODE                            // render fenced code blocks
+	EXTENSION_AUTOLINK                               // detect embedded URLs that are not explicitly marked
+	EXTENSION_STRIKETHROUGH                          // strikethrough text using ~~test~~
+	EXTENSION_LAX_HTML_BLOCKS                        // loosen up HTML block parsing rules
+	EXTENSION_SPACE_HEADERS                          // be strict about prefix header rules
+	EXTENSION_HARD_LINE_BREAK                        // translate newlines into line breaks
+	EXTENSION_TAB_SIZE_EIGHT                         // expand tabs to eight spaces instead of four
+	EXTENSION_FOOTNOTES                              // Pandoc-style footnotes
+	EXTENSION_NO_EMPTY_LINE_BEFORE_BLOCK             // No need to insert an empty line to start a (code, quote, ordered list, unordered list) block
+	EXTENSION_HEADER_IDS                             // specify header IDs  with {#id}
+	EXTENSION_TITLEBLOCK                             // Titleblock ala pandoc
+	EXTENSION_AUTO_HEADER_IDS                        // Create the header ID from the text
+	EXTENSION_BACKSLASH_LINE_BREAK                   // translate trailing backslashes into line breaks
+	EXTENSION_DEFINITION_LISTS                       // render definition lists
+	EXTENSION_JOIN_LINES                             // delete newline and join lines
+
+	commonHtmlFlags = 0 |
+		HTML_USE_XHTML |
+		HTML_USE_SMARTYPANTS |
+		HTML_SMARTYPANTS_FRACTIONS |
+		HTML_SMARTYPANTS_DASHES |
+		HTML_SMARTYPANTS_LATEX_DASHES
+
+	commonExtensions = 0 |
+		EXTENSION_NO_INTRA_EMPHASIS |
+		EXTENSION_TABLES |
+		EXTENSION_FENCED_CODE |
+		EXTENSION_AUTOLINK |
+		EXTENSION_STRIKETHROUGH |
+		EXTENSION_SPACE_HEADERS |
+		EXTENSION_HEADER_IDS |
+		EXTENSION_BACKSLASH_LINE_BREAK |
+		EXTENSION_DEFINITION_LISTS
+)
+
+// These are the possible flag values for the link renderer.
+// Only a single one of these values will be used; they are not ORed together.
+// These are mostly of interest if you are writing a new output format.
+const (
+	LINK_TYPE_NOT_AUTOLINK = iota
+	LINK_TYPE_NORMAL
+	LINK_TYPE_EMAIL
+)
+
+// These are the possible flag values for the ListItem renderer.
+// Multiple flag values may be ORed together.
+// These are mostly of interest if you are writing a new output format.
+const (
+	LIST_TYPE_ORDERED = 1 << iota
+	LIST_TYPE_DEFINITION
+	LIST_TYPE_TERM
+	LIST_ITEM_CONTAINS_BLOCK
+	LIST_ITEM_BEGINNING_OF_LIST
+	LIST_ITEM_END_OF_LIST
+)
+
+// These are the possible flag values for the table cell renderer.
+// Only a single one of these values will be used; they are not ORed together.
+// These are mostly of interest if you are writing a new output format.
+const (
+	TABLE_ALIGNMENT_LEFT = 1 << iota
+	TABLE_ALIGNMENT_RIGHT
+	TABLE_ALIGNMENT_CENTER = (TABLE_ALIGNMENT_LEFT | TABLE_ALIGNMENT_RIGHT)
+)
+
+// The size of a tab stop.
+const (
+	TAB_SIZE_DEFAULT = 4
+	TAB_SIZE_EIGHT   = 8
+)
+
+// blockTags is a set of tags that are recognized as HTML block tags.
+// Any of these can be included in markdown text without special escaping.
+var blockTags = map[string]struct{}{
+	"blockquote": {},
+	"del":        {},
+	"div":        {},
+	"dl":         {},
+	"fieldset":   {},
+	"form":       {},
+	"h1":         {},
+	"h2":         {},
+	"h3":         {},
+	"h4":         {},
+	"h5":         {},
+	"h6":         {},
+	"iframe":     {},
+	"ins":        {},
+	"math":       {},
+	"noscript":   {},
+	"ol":         {},
+	"pre":        {},
+	"p":          {},
+	"script":     {},
+	"style":      {},
+	"table":      {},
+	"ul":         {},
+
+	// HTML5
+	"address":    {},
+	"article":    {},
+	"aside":      {},
+	"canvas":     {},
+	"figcaption": {},
+	"figure":     {},
+	"footer":     {},
+	"header":     {},
+	"hgroup":     {},
+	"main":       {},
+	"nav":        {},
+	"output":     {},
+	"progress":   {},
+	"section":    {},
+	"video":      {},
+}
+
+// Renderer is the rendering interface.
+// This is mostly of interest if you are implementing a new rendering format.
+//
+// When a byte slice is provided, it contains the (rendered) contents of the
+// element.
+//
+// When a callback is provided instead, it will write the contents of the
+// respective element directly to the output buffer and return true on success.
+// If the callback returns false, the rendering function should reset the
+// output buffer as though it had never been called.
+//
+// Currently Html and Latex implementations are provided
+type Renderer interface {
+	// block-level callbacks
+	BlockCode(out *bytes.Buffer, text []byte, infoString string)
+	BlockQuote(out *bytes.Buffer, text []byte)
+	BlockHtml(out *bytes.Buffer, text []byte)
+	Header(out *bytes.Buffer, text func() bool, level int, id string)
+	HRule(out *bytes.Buffer)
+	List(out *bytes.Buffer, text func() bool, flags int)
+	ListItem(out *bytes.Buffer, text []byte, flags int)
+	Paragraph(out *bytes.Buffer, text func() bool)
+	Table(out *bytes.Buffer, header []byte, body []byte, columnData []int)
+	TableRow(out *bytes.Buffer, text []byte)
+	TableHeaderCell(out *bytes.Buffer, text []byte, flags int)
+	TableCell(out *bytes.Buffer, text []byte, flags int)
+	Footnotes(out *bytes.Buffer, text func() bool)
+	FootnoteItem(out *bytes.Buffer, name, text []byte, flags int)
+	TitleBlock(out *bytes.Buffer, text []byte)
+
+	// Span-level callbacks
+	AutoLink(out *bytes.Buffer, link []byte, kind int)
+	CodeSpan(out *bytes.Buffer, text []byte)
+	DoubleEmphasis(out *bytes.Buffer, text []byte)
+	Emphasis(out *bytes.Buffer, text []byte)
+	Image(out *bytes.Buffer, link []byte, title []byte, alt []byte)
+	LineBreak(out *bytes.Buffer)
+	Link(out *bytes.Buffer, link []byte, title []byte, content []byte)
+	RawHtmlTag(out *bytes.Buffer, tag []byte)
+	TripleEmphasis(out *bytes.Buffer, text []byte)
+	StrikeThrough(out *bytes.Buffer, text []byte)
+	FootnoteRef(out *bytes.Buffer, ref []byte, id int)
+
+	// Low-level callbacks
+	Entity(out *bytes.Buffer, entity []byte)
+	NormalText(out *bytes.Buffer, text []byte)
+
+	// Header and footer
+	DocumentHeader(out *bytes.Buffer)
+	DocumentFooter(out *bytes.Buffer)
+
+	GetFlags() int
+}
+
+// Callback functions for inline parsing. One such function is defined
+// for each character that triggers a response when parsing inline data.
+type inlineParser func(p *parser, out *bytes.Buffer, data []byte, offset int) int
+
+// Parser holds runtime state used by the parser.
+// This is constructed by the Markdown function.
+type parser struct {
+	r              Renderer
+	refOverride    ReferenceOverrideFunc
+	refs           map[string]*reference
+	inlineCallback [256]inlineParser
+	flags          int
+	nesting        int
+	maxNesting     int
+	insideLink     bool
+
+	// Footnotes need to be ordered as well as available to quickly check for
+	// presence. If a ref is also a footnote, it's stored both in refs and here
+	// in notes. Slice is nil if footnotes not enabled.
+	notes       []*reference
+	notesRecord map[string]struct{}
+}
+
+func (p *parser) getRef(refid string) (ref *reference, found bool) {
+	if p.refOverride != nil {
+		r, overridden := p.refOverride(refid)
+		if overridden {
+			if r == nil {
+				return nil, false
+			}
+			return &reference{
+				link:     []byte(r.Link),
+				title:    []byte(r.Title),
+				noteId:   0,
+				hasBlock: false,
+				text:     []byte(r.Text)}, true
+		}
+	}
+	// refs are case insensitive
+	ref, found = p.refs[strings.ToLower(refid)]
+	return ref, found
+}
+
+func (p *parser) isFootnote(ref *reference) bool {
+	_, ok := p.notesRecord[string(ref.link)]
+	return ok
+}
+
+//
+//
+// Public interface
+//
+//
+
+// Reference represents the details of a link.
+// See the documentation in Options for more details on use-case.
+type Reference struct {
+	// Link is usually the URL the reference points to.
+	Link string
+	// Title is the alternate text describing the link in more detail.
+	Title string
+	// Text is the optional text to override the ref with if the syntax used was
+	// [refid][]
+	Text string
+}
+
+// ReferenceOverrideFunc is expected to be called with a reference string and
+// return either a valid Reference type that the reference string maps to or
+// nil. If overridden is false, the default reference logic will be executed.
+// See the documentation in Options for more details on use-case.
+type ReferenceOverrideFunc func(reference string) (ref *Reference, overridden bool)
+
+// Options represents configurable overrides and callbacks (in addition to the
+// extension flag set) for configuring a Markdown parse.
+type Options struct {
+	// Extensions is a flag set of bit-wise ORed extension bits. See the
+	// EXTENSION_* flags defined in this package.
+	Extensions int
+
+	// ReferenceOverride is an optional function callback that is called every
+	// time a reference is resolved.
+	//
+	// In Markdown, the link reference syntax can be made to resolve a link to
+	// a reference instead of an inline URL, in one of the following ways:
+	//
+	//  * [link text][refid]
+	//  * [refid][]
+	//
+	// Usually, the refid is defined at the bottom of the Markdown document. If
+	// this override function is provided, the refid is passed to the override
+	// function first, before consulting the defined refids at the bottom. If
+	// the override function indicates an override did not occur, the refids at
+	// the bottom will be used to fill in the link details.
+	ReferenceOverride ReferenceOverrideFunc
+}
+
+// MarkdownBasic is a convenience function for simple rendering.
+// It processes markdown input with no extensions enabled.
+func MarkdownBasic(input []byte) []byte {
+	// set up the HTML renderer
+	htmlFlags := HTML_USE_XHTML
+	renderer := HtmlRenderer(htmlFlags, "", "")
+
+	// set up the parser
+	return MarkdownOptions(input, renderer, Options{Extensions: 0})
+}
+
+// Call Markdown with most useful extensions enabled
+// MarkdownCommon is a convenience function for simple rendering.
+// It processes markdown input with common extensions enabled, including:
+//
+// * Smartypants processing with smart fractions and LaTeX dashes
+//
+// * Intra-word emphasis suppression
+//
+// * Tables
+//
+// * Fenced code blocks
+//
+// * Autolinking
+//
+// * Strikethrough support
+//
+// * Strict header parsing
+//
+// * Custom Header IDs
+func MarkdownCommon(input []byte) []byte {
+	// set up the HTML renderer
+	renderer := HtmlRenderer(commonHtmlFlags, "", "")
+	return MarkdownOptions(input, renderer, Options{
+		Extensions: commonExtensions})
+}
+
+// Markdown is the main rendering function.
+// It parses and renders a block of markdown-encoded text.
+// The supplied Renderer is used to format the output, and extensions dictates
+// which non-standard extensions are enabled.
+//
+// To use the supplied Html or LaTeX renderers, see HtmlRenderer and
+// LatexRenderer, respectively.
+func Markdown(input []byte, renderer Renderer, extensions int) []byte {
+	return MarkdownOptions(input, renderer, Options{
+		Extensions: extensions})
+}
+
+// MarkdownOptions is just like Markdown but takes additional options through
+// the Options struct.
+func MarkdownOptions(input []byte, renderer Renderer, opts Options) []byte {
+	// no point in parsing if we can't render
+	if renderer == nil {
+		return nil
+	}
+
+	extensions := opts.Extensions
+
+	// fill in the render structure
+	p := new(parser)
+	p.r = renderer
+	p.flags = extensions
+	p.refOverride = opts.ReferenceOverride
+	p.refs = make(map[string]*reference)
+	p.maxNesting = 16
+	p.insideLink = false
+
+	// register inline parsers
+	p.inlineCallback['*'] = emphasis
+	p.inlineCallback['_'] = emphasis
+	if extensions&EXTENSION_STRIKETHROUGH != 0 {
+		p.inlineCallback['~'] = emphasis
+	}
+	p.inlineCallback['`'] = codeSpan
+	p.inlineCallback['\n'] = lineBreak
+	p.inlineCallback['['] = link
+	p.inlineCallback['<'] = leftAngle
+	p.inlineCallback['\\'] = escape
+	p.inlineCallback['&'] = entity
+
+	if extensions&EXTENSION_AUTOLINK != 0 {
+		p.inlineCallback[':'] = autoLink
+	}
+
+	if extensions&EXTENSION_FOOTNOTES != 0 {
+		p.notes = make([]*reference, 0)
+		p.notesRecord = make(map[string]struct{})
+	}
+
+	first := firstPass(p, input)
+	second := secondPass(p, first)
+	return second
+}
+
+// first pass:
+// - normalize newlines
+// - extract references (outside of fenced code blocks)
+// - expand tabs (outside of fenced code blocks)
+// - copy everything else
+func firstPass(p *parser, input []byte) []byte {
+	var out bytes.Buffer
+	tabSize := TAB_SIZE_DEFAULT
+	if p.flags&EXTENSION_TAB_SIZE_EIGHT != 0 {
+		tabSize = TAB_SIZE_EIGHT
+	}
+	beg := 0
+	lastFencedCodeBlockEnd := 0
+	for beg < len(input) {
+		// Find end of this line, then process the line.
+		end := beg
+		for end < len(input) && input[end] != '\n' && input[end] != '\r' {
+			end++
+		}
+
+		if p.flags&EXTENSION_FENCED_CODE != 0 {
+			// track fenced code block boundaries to suppress tab expansion
+			// and reference extraction inside them:
+			if beg >= lastFencedCodeBlockEnd {
+				if i := p.fencedCodeBlock(&out, input[beg:], false); i > 0 {
+					lastFencedCodeBlockEnd = beg + i
+				}
+			}
+		}
+
+		// add the line body if present
+		if end > beg {
+			if end < lastFencedCodeBlockEnd { // Do not expand tabs while inside fenced code blocks.
+				out.Write(input[beg:end])
+			} else if refEnd := isReference(p, input[beg:], tabSize); refEnd > 0 {
+				beg += refEnd
+				continue
+			} else {
+				expandTabs(&out, input[beg:end], tabSize)
+			}
+		}
+
+		if end < len(input) && input[end] == '\r' {
+			end++
+		}
+		if end < len(input) && input[end] == '\n' {
+			end++
+		}
+		out.WriteByte('\n')
+
+		beg = end
+	}
+
+	// empty input?
+	if out.Len() == 0 {
+		out.WriteByte('\n')
+	}
+
+	return out.Bytes()
+}
+
+// second pass: actual rendering
+func secondPass(p *parser, input []byte) []byte {
+	var output bytes.Buffer
+
+	p.r.DocumentHeader(&output)
+	p.block(&output, input)
+
+	if p.flags&EXTENSION_FOOTNOTES != 0 && len(p.notes) > 0 {
+		p.r.Footnotes(&output, func() bool {
+			flags := LIST_ITEM_BEGINNING_OF_LIST
+			for i := 0; i < len(p.notes); i += 1 {
+				ref := p.notes[i]
+				var buf bytes.Buffer
+				if ref.hasBlock {
+					flags |= LIST_ITEM_CONTAINS_BLOCK
+					p.block(&buf, ref.title)
+				} else {
+					p.inline(&buf, ref.title)
+				}
+				p.r.FootnoteItem(&output, ref.link, buf.Bytes(), flags)
+				flags &^= LIST_ITEM_BEGINNING_OF_LIST | LIST_ITEM_CONTAINS_BLOCK
+			}
+
+			return true
+		})
+	}
+
+	p.r.DocumentFooter(&output)
+
+	if p.nesting != 0 {
+		panic("Nesting level did not end at zero")
+	}
+
+	return output.Bytes()
+}
+
+//
+// Link references
+//
+// This section implements support for references that (usually) appear
+// as footnotes in a document, and can be referenced anywhere in the document.
+// The basic format is:
+//
+//    [1]: http://www.google.com/ "Google"
+//    [2]: http://www.github.com/ "Github"
+//
+// Anywhere in the document, the reference can be linked by referring to its
+// label, i.e., 1 and 2 in this example, as in:
+//
+//    This library is hosted on [Github][2], a git hosting site.
+//
+// Actual footnotes as specified in Pandoc and supported by some other Markdown
+// libraries such as php-markdown are also taken care of. They look like this:
+//
+//    This sentence needs a bit of further explanation.[^note]
+//
+//    [^note]: This is the explanation.
+//
+// Footnotes should be placed at the end of the document in an ordered list.
+// Inline footnotes such as:
+//
+//    Inline footnotes^[Not supported.] also exist.
+//
+// are not yet supported.
+
+// References are parsed and stored in this struct.
+type reference struct {
+	link     []byte
+	title    []byte
+	noteId   int // 0 if not a footnote ref
+	hasBlock bool
+	text     []byte
+}
+
+func (r *reference) String() string {
+	return fmt.Sprintf("{link: %q, title: %q, text: %q, noteId: %d, hasBlock: %v}",
+		r.link, r.title, r.text, r.noteId, r.hasBlock)
+}
+
+// Check whether or not data starts with a reference link.
+// If so, it is parsed and stored in the list of references
+// (in the render struct).
+// Returns the number of bytes to skip to move past it,
+// or zero if the first line is not a reference.
+func isReference(p *parser, data []byte, tabSize int) int {
+	// up to 3 optional leading spaces
+	if len(data) < 4 {
+		return 0
+	}
+	i := 0
+	for i < 3 && data[i] == ' ' {
+		i++
+	}
+
+	noteId := 0
+
+	// id part: anything but a newline between brackets
+	if data[i] != '[' {
+		return 0
+	}
+	i++
+	if p.flags&EXTENSION_FOOTNOTES != 0 {
+		if i < len(data) && data[i] == '^' {
+			// we can set it to anything here because the proper noteIds will
+			// be assigned later during the second pass. It just has to be != 0
+			noteId = 1
+			i++
+		}
+	}
+	idOffset := i
+	for i < len(data) && data[i] != '\n' && data[i] != '\r' && data[i] != ']' {
+		i++
+	}
+	if i >= len(data) || data[i] != ']' {
+		return 0
+	}
+	idEnd := i
+
+	// spacer: colon (space | tab)* newline? (space | tab)*
+	i++
+	if i >= len(data) || data[i] != ':' {
+		return 0
+	}
+	i++
+	for i < len(data) && (data[i] == ' ' || data[i] == '\t') {
+		i++
+	}
+	if i < len(data) && (data[i] == '\n' || data[i] == '\r') {
+		i++
+		if i < len(data) && data[i] == '\n' && data[i-1] == '\r' {
+			i++
+		}
+	}
+	for i < len(data) && (data[i] == ' ' || data[i] == '\t') {
+		i++
+	}
+	if i >= len(data) {
+		return 0
+	}
+
+	var (
+		linkOffset, linkEnd   int
+		titleOffset, titleEnd int
+		lineEnd               int
+		raw                   []byte
+		hasBlock              bool
+	)
+
+	if p.flags&EXTENSION_FOOTNOTES != 0 && noteId != 0 {
+		linkOffset, linkEnd, raw, hasBlock = scanFootnote(p, data, i, tabSize)
+		lineEnd = linkEnd
+	} else {
+		linkOffset, linkEnd, titleOffset, titleEnd, lineEnd = scanLinkRef(p, data, i)
+	}
+	if lineEnd == 0 {
+		return 0
+	}
+
+	// a valid ref has been found
+
+	ref := &reference{
+		noteId:   noteId,
+		hasBlock: hasBlock,
+	}
+
+	if noteId > 0 {
+		// reusing the link field for the id since footnotes don't have links
+		ref.link = data[idOffset:idEnd]
+		// if footnote, it's not really a title, it's the contained text
+		ref.title = raw
+	} else {
+		ref.link = data[linkOffset:linkEnd]
+		ref.title = data[titleOffset:titleEnd]
+	}
+
+	// id matches are case-insensitive
+	id := string(bytes.ToLower(data[idOffset:idEnd]))
+
+	p.refs[id] = ref
+
+	return lineEnd
+}
+
+func scanLinkRef(p *parser, data []byte, i int) (linkOffset, linkEnd, titleOffset, titleEnd, lineEnd int) {
+	// link: whitespace-free sequence, optionally between angle brackets
+	if data[i] == '<' {
+		i++
+	}
+	linkOffset = i
+	if i == len(data) {
+		return
+	}
+	for i < len(data) && data[i] != ' ' && data[i] != '\t' && data[i] != '\n' && data[i] != '\r' {
+		i++
+	}
+	linkEnd = i
+	if data[linkOffset] == '<' && data[linkEnd-1] == '>' {
+		linkOffset++
+		linkEnd--
+	}
+
+	// optional spacer: (space | tab)* (newline | '\'' | '"' | '(' )
+	for i < len(data) && (data[i] == ' ' || data[i] == '\t') {
+		i++
+	}
+	if i < len(data) && data[i] != '\n' && data[i] != '\r' && data[i] != '\'' && data[i] != '"' && data[i] != '(' {
+		return
+	}
+
+	// compute end-of-line
+	if i >= len(data) || data[i] == '\r' || data[i] == '\n' {
+		lineEnd = i
+	}
+	if i+1 < len(data) && data[i] == '\r' && data[i+1] == '\n' {
+		lineEnd++
+	}
+
+	// optional (space|tab)* spacer after a newline
+	if lineEnd > 0 {
+		i = lineEnd + 1
+		for i < len(data) && (data[i] == ' ' || data[i] == '\t') {
+			i++
+		}
+	}
+
+	// optional title: any non-newline sequence enclosed in '"() alone on its line
+	if i+1 < len(data) && (data[i] == '\'' || data[i] == '"' || data[i] == '(') {
+		i++
+		titleOffset = i
+
+		// look for EOL
+		for i < len(data) && data[i] != '\n' && data[i] != '\r' {
+			i++
+		}
+		if i+1 < len(data) && data[i] == '\n' && data[i+1] == '\r' {
+			titleEnd = i + 1
+		} else {
+			titleEnd = i
+		}
+
+		// step back
+		i--
+		for i > titleOffset && (data[i] == ' ' || data[i] == '\t') {
+			i--
+		}
+		if i > titleOffset && (data[i] == '\'' || data[i] == '"' || data[i] == ')') {
+			lineEnd = titleEnd
+			titleEnd = i
+		}
+	}
+
+	return
+}
+
+// The first bit of this logic is the same as (*parser).listItem, but the rest
+// is much simpler. This function simply finds the entire block and shifts it
+// over by one tab if it is indeed a block (just returns the line if it's not).
+// blockEnd is the end of the section in the input buffer, and contents is the
+// extracted text that was shifted over one tab. It will need to be rendered at
+// the end of the document.
+func scanFootnote(p *parser, data []byte, i, indentSize int) (blockStart, blockEnd int, contents []byte, hasBlock bool) {
+	if i == 0 || len(data) == 0 {
+		return
+	}
+
+	// skip leading whitespace on first line
+	for i < len(data) && data[i] == ' ' {
+		i++
+	}
+
+	blockStart = i
+
+	// find the end of the line
+	blockEnd = i
+	for i < len(data) && data[i-1] != '\n' {
+		i++
+	}
+
+	// get working buffer
+	var raw bytes.Buffer
+
+	// put the first line into the working buffer
+	raw.Write(data[blockEnd:i])
+	blockEnd = i
+
+	// process the following lines
+	containsBlankLine := false
+
+gatherLines:
+	for blockEnd < len(data) {
+		i++
+
+		// find the end of this line
+		for i < len(data) && data[i-1] != '\n' {
+			i++
+		}
+
+		// if it is an empty line, guess that it is part of this item
+		// and move on to the next line
+		if p.isEmpty(data[blockEnd:i]) > 0 {
+			containsBlankLine = true
+			blockEnd = i
+			continue
+		}
+
+		n := 0
+		if n = isIndented(data[blockEnd:i], indentSize); n == 0 {
+			// this is the end of the block.
+			// we don't want to include this last line in the index.
+			break gatherLines
+		}
+
+		// if there were blank lines before this one, insert a new one now
+		if containsBlankLine {
+			raw.WriteByte('\n')
+			containsBlankLine = false
+		}
+
+		// get rid of that first tab, write to buffer
+		raw.Write(data[blockEnd+n : i])
+		hasBlock = true
+
+		blockEnd = i
+	}
+
+	if data[blockEnd-1] != '\n' {
+		raw.WriteByte('\n')
+	}
+
+	contents = raw.Bytes()
+
+	return
+}
+
+//
+//
+// Miscellaneous helper functions
+//
+//
+
+// Test if a character is a punctuation symbol.
+// Taken from a private function in regexp in the stdlib.
+func ispunct(c byte) bool {
+	for _, r := range []byte("!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~") {
+		if c == r {
+			return true
+		}
+	}
+	return false
+}
+
+// Test if a character is a whitespace character.
+func isspace(c byte) bool {
+	return ishorizontalspace(c) || isverticalspace(c)
+}
+
+// Test if a character is a horizontal whitespace character.
+func ishorizontalspace(c byte) bool {
+	return c == ' ' || c == '\t'
+}
+
+// Test if a character is a vertical whitespace character.
+func isverticalspace(c byte) bool {
+	return c == '\n' || c == '\r' || c == '\f' || c == '\v'
+}
+
+// Test if a character is letter.
+func isletter(c byte) bool {
+	return (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z')
+}
+
+// Test if a character is a letter or a digit.
+// TODO: check when this is looking for ASCII alnum and when it should use unicode
+func isalnum(c byte) bool {
+	return (c >= '0' && c <= '9') || isletter(c)
+}
+
+// Replace tab characters with spaces, aligning to the next TAB_SIZE column.
+// always ends output with a newline
+func expandTabs(out *bytes.Buffer, line []byte, tabSize int) {
+	// first, check for common cases: no tabs, or only tabs at beginning of line
+	i, prefix := 0, 0
+	slowcase := false
+	for i = 0; i < len(line); i++ {
+		if line[i] == '\t' {
+			if prefix == i {
+				prefix++
+			} else {
+				slowcase = true
+				break
+			}
+		}
+	}
+
+	// no need to decode runes if all tabs are at the beginning of the line
+	if !slowcase {
+		for i = 0; i < prefix*tabSize; i++ {
+			out.WriteByte(' ')
+		}
+		out.Write(line[prefix:])
+		return
+	}
+
+	// the slow case: we need to count runes to figure out how
+	// many spaces to insert for each tab
+	column := 0
+	i = 0
+	for i < len(line) {
+		start := i
+		for i < len(line) && line[i] != '\t' {
+			_, size := utf8.DecodeRune(line[i:])
+			i += size
+			column++
+		}
+
+		if i > start {
+			out.Write(line[start:i])
+		}
+
+		if i >= len(line) {
+			break
+		}
+
+		for {
+			out.WriteByte(' ')
+			column++
+			if column%tabSize == 0 {
+				break
+			}
+		}
+
+		i++
+	}
+}
+
+// Find if a line counts as indented or not.
+// Returns number of characters the indent is (0 = not indented).
+func isIndented(data []byte, indentSize int) int {
+	if len(data) == 0 {
+		return 0
+	}
+	if data[0] == '\t' {
+		return 1
+	}
+	if len(data) < indentSize {
+		return 0
+	}
+	for i := 0; i < indentSize; i++ {
+		if data[i] != ' ' {
+			return 0
+		}
+	}
+	return indentSize
+}
+
+// Create a url-safe slug for fragments
+func slugify(in []byte) []byte {
+	if len(in) == 0 {
+		return in
+	}
+	out := make([]byte, 0, len(in))
+	sym := false
+
+	for _, ch := range in {
+		if isalnum(ch) {
+			sym = false
+			out = append(out, ch)
+		} else if sym {
+			continue
+		} else {
+			out = append(out, '-')
+			sym = true
+		}
+	}
+	var a, b int
+	var ch byte
+	for a, ch = range out {
+		if ch != '-' {
+			break
+		}
+	}
+	for b = len(out) - 1; b > 0; b-- {
+		if out[b] != '-' {
+			break
+		}
+	}
+	return out[a : b+1]
+}
diff --git a/vendor/github.com/russross/blackfriday/smartypants.go b/vendor/github.com/russross/blackfriday/smartypants.go
new file mode 100644
index 00000000..f25bd07d
--- /dev/null
+++ b/vendor/github.com/russross/blackfriday/smartypants.go
@@ -0,0 +1,430 @@
+//
+// Blackfriday Markdown Processor
+// Available at http://github.com/russross/blackfriday
+//
+// Copyright © 2011 Russ Ross <russ@russross.com>.
+// Distributed under the Simplified BSD License.
+// See README.md for details.
+//
+
+//
+//
+// SmartyPants rendering
+//
+//
+
+package blackfriday
+
+import (
+	"bytes"
+)
+
+type smartypantsData struct {
+	inSingleQuote bool
+	inDoubleQuote bool
+}
+
+func wordBoundary(c byte) bool {
+	return c == 0 || isspace(c) || ispunct(c)
+}
+
+func tolower(c byte) byte {
+	if c >= 'A' && c <= 'Z' {
+		return c - 'A' + 'a'
+	}
+	return c
+}
+
+func isdigit(c byte) bool {
+	return c >= '0' && c <= '9'
+}
+
+func smartQuoteHelper(out *bytes.Buffer, previousChar byte, nextChar byte, quote byte, isOpen *bool, addNBSP bool) bool {
+	// edge of the buffer is likely to be a tag that we don't get to see,
+	// so we treat it like text sometimes
+
+	// enumerate all sixteen possibilities for (previousChar, nextChar)
+	// each can be one of {0, space, punct, other}
+	switch {
+	case previousChar == 0 && nextChar == 0:
+		// context is not any help here, so toggle
+		*isOpen = !*isOpen
+	case isspace(previousChar) && nextChar == 0:
+		// [ "] might be [ "<code>foo...]
+		*isOpen = true
+	case ispunct(previousChar) && nextChar == 0:
+		// [!"] hmm... could be [Run!"] or [("<code>...]
+		*isOpen = false
+	case /* isnormal(previousChar) && */ nextChar == 0:
+		// [a"] is probably a close
+		*isOpen = false
+	case previousChar == 0 && isspace(nextChar):
+		// [" ] might be [...foo</code>" ]
+		*isOpen = false
+	case isspace(previousChar) && isspace(nextChar):
+		// [ " ] context is not any help here, so toggle
+		*isOpen = !*isOpen
+	case ispunct(previousChar) && isspace(nextChar):
+		// [!" ] is probably a close
+		*isOpen = false
+	case /* isnormal(previousChar) && */ isspace(nextChar):
+		// [a" ] this is one of the easy cases
+		*isOpen = false
+	case previousChar == 0 && ispunct(nextChar):
+		// ["!] hmm... could be ["$1.95] or [</code>"!...]
+		*isOpen = false
+	case isspace(previousChar) && ispunct(nextChar):
+		// [ "!] looks more like [ "$1.95]
+		*isOpen = true
+	case ispunct(previousChar) && ispunct(nextChar):
+		// [!"!] context is not any help here, so toggle
+		*isOpen = !*isOpen
+	case /* isnormal(previousChar) && */ ispunct(nextChar):
+		// [a"!] is probably a close
+		*isOpen = false
+	case previousChar == 0 /* && isnormal(nextChar) */ :
+		// ["a] is probably an open
+		*isOpen = true
+	case isspace(previousChar) /* && isnormal(nextChar) */ :
+		// [ "a] this is one of the easy cases
+		*isOpen = true
+	case ispunct(previousChar) /* && isnormal(nextChar) */ :
+		// [!"a] is probably an open
+		*isOpen = true
+	default:
+		// [a'b] maybe a contraction?
+		*isOpen = false
+	}
+
+	// Note that with the limited lookahead, this non-breaking
+	// space will also be appended to single double quotes.
+	if addNBSP && !*isOpen {
+		out.WriteString("&nbsp;")
+	}
+
+	out.WriteByte('&')
+	if *isOpen {
+		out.WriteByte('l')
+	} else {
+		out.WriteByte('r')
+	}
+	out.WriteByte(quote)
+	out.WriteString("quo;")
+
+	if addNBSP && *isOpen {
+		out.WriteString("&nbsp;")
+	}
+
+	return true
+}
+
+func smartSingleQuote(out *bytes.Buffer, smrt *smartypantsData, previousChar byte, text []byte) int {
+	if len(text) >= 2 {
+		t1 := tolower(text[1])
+
+		if t1 == '\'' {
+			nextChar := byte(0)
+			if len(text) >= 3 {
+				nextChar = text[2]
+			}
+			if smartQuoteHelper(out, previousChar, nextChar, 'd', &smrt.inDoubleQuote, false) {
+				return 1
+			}
+		}
+
+		if (t1 == 's' || t1 == 't' || t1 == 'm' || t1 == 'd') && (len(text) < 3 || wordBoundary(text[2])) {
+			out.WriteString("&rsquo;")
+			return 0
+		}
+
+		if len(text) >= 3 {
+			t2 := tolower(text[2])
+
+			if ((t1 == 'r' && t2 == 'e') || (t1 == 'l' && t2 == 'l') || (t1 == 'v' && t2 == 'e')) &&
+				(len(text) < 4 || wordBoundary(text[3])) {
+				out.WriteString("&rsquo;")
+				return 0
+			}
+		}
+	}
+
+	nextChar := byte(0)
+	if len(text) > 1 {
+		nextChar = text[1]
+	}
+	if smartQuoteHelper(out, previousChar, nextChar, 's', &smrt.inSingleQuote, false) {
+		return 0
+	}
+
+	out.WriteByte(text[0])
+	return 0
+}
+
+func smartParens(out *bytes.Buffer, smrt *smartypantsData, previousChar byte, text []byte) int {
+	if len(text) >= 3 {
+		t1 := tolower(text[1])
+		t2 := tolower(text[2])
+
+		if t1 == 'c' && t2 == ')' {
+			out.WriteString("&copy;")
+			return 2
+		}
+
+		if t1 == 'r' && t2 == ')' {
+			out.WriteString("&reg;")
+			return 2
+		}
+
+		if len(text) >= 4 && t1 == 't' && t2 == 'm' && text[3] == ')' {
+			out.WriteString("&trade;")
+			return 3
+		}
+	}
+
+	out.WriteByte(text[0])
+	return 0
+}
+
+func smartDash(out *bytes.Buffer, smrt *smartypantsData, previousChar byte, text []byte) int {
+	if len(text) >= 2 {
+		if text[1] == '-' {
+			out.WriteString("&mdash;")
+			return 1
+		}
+
+		if wordBoundary(previousChar) && wordBoundary(text[1]) {
+			out.WriteString("&ndash;")
+			return 0
+		}
+	}
+
+	out.WriteByte(text[0])
+	return 0
+}
+
+func smartDashLatex(out *bytes.Buffer, smrt *smartypantsData, previousChar byte, text []byte) int {
+	if len(text) >= 3 && text[1] == '-' && text[2] == '-' {
+		out.WriteString("&mdash;")
+		return 2
+	}
+	if len(text) >= 2 && text[1] == '-' {
+		out.WriteString("&ndash;")
+		return 1
+	}
+
+	out.WriteByte(text[0])
+	return 0
+}
+
+func smartAmpVariant(out *bytes.Buffer, smrt *smartypantsData, previousChar byte, text []byte, quote byte, addNBSP bool) int {
+	if bytes.HasPrefix(text, []byte("&quot;")) {
+		nextChar := byte(0)
+		if len(text) >= 7 {
+			nextChar = text[6]
+		}
+		if smartQuoteHelper(out, previousChar, nextChar, quote, &smrt.inDoubleQuote, addNBSP) {
+			return 5
+		}
+	}
+
+	if bytes.HasPrefix(text, []byte("&#0;")) {
+		return 3
+	}
+
+	out.WriteByte('&')
+	return 0
+}
+
+func smartAmp(angledQuotes, addNBSP bool) func(out *bytes.Buffer, smrt *smartypantsData, previousChar byte, text []byte) int {
+	var quote byte = 'd'
+	if angledQuotes {
+		quote = 'a'
+	}
+
+	return func(out *bytes.Buffer, smrt *smartypantsData, previousChar byte, text []byte) int {
+		return smartAmpVariant(out, smrt, previousChar, text, quote, addNBSP)
+	}
+}
+
+func smartPeriod(out *bytes.Buffer, smrt *smartypantsData, previousChar byte, text []byte) int {
+	if len(text) >= 3 && text[1] == '.' && text[2] == '.' {
+		out.WriteString("&hellip;")
+		return 2
+	}
+
+	if len(text) >= 5 && text[1] == ' ' && text[2] == '.' && text[3] == ' ' && text[4] == '.' {
+		out.WriteString("&hellip;")
+		return 4
+	}
+
+	out.WriteByte(text[0])
+	return 0
+}
+
+func smartBacktick(out *bytes.Buffer, smrt *smartypantsData, previousChar byte, text []byte) int {
+	if len(text) >= 2 && text[1] == '`' {
+		nextChar := byte(0)
+		if len(text) >= 3 {
+			nextChar = text[2]
+		}
+		if smartQuoteHelper(out, previousChar, nextChar, 'd', &smrt.inDoubleQuote, false) {
+			return 1
+		}
+	}
+
+	out.WriteByte(text[0])
+	return 0
+}
+
+func smartNumberGeneric(out *bytes.Buffer, smrt *smartypantsData, previousChar byte, text []byte) int {
+	if wordBoundary(previousChar) && previousChar != '/' && len(text) >= 3 {
+		// is it of the form digits/digits(word boundary)?, i.e., \d+/\d+\b
+		// note: check for regular slash (/) or fraction slash (⁄, 0x2044, or 0xe2 81 84 in utf-8)
+		//       and avoid changing dates like 1/23/2005 into fractions.
+		numEnd := 0
+		for len(text) > numEnd && isdigit(text[numEnd]) {
+			numEnd++
+		}
+		if numEnd == 0 {
+			out.WriteByte(text[0])
+			return 0
+		}
+		denStart := numEnd + 1
+		if len(text) > numEnd+3 && text[numEnd] == 0xe2 && text[numEnd+1] == 0x81 && text[numEnd+2] == 0x84 {
+			denStart = numEnd + 3
+		} else if len(text) < numEnd+2 || text[numEnd] != '/' {
+			out.WriteByte(text[0])
+			return 0
+		}
+		denEnd := denStart
+		for len(text) > denEnd && isdigit(text[denEnd]) {
+			denEnd++
+		}
+		if denEnd == denStart {
+			out.WriteByte(text[0])
+			return 0
+		}
+		if len(text) == denEnd || wordBoundary(text[denEnd]) && text[denEnd] != '/' {
+			out.WriteString("<sup>")
+			out.Write(text[:numEnd])
+			out.WriteString("</sup>&frasl;<sub>")
+			out.Write(text[denStart:denEnd])
+			out.WriteString("</sub>")
+			return denEnd - 1
+		}
+	}
+
+	out.WriteByte(text[0])
+	return 0
+}
+
+func smartNumber(out *bytes.Buffer, smrt *smartypantsData, previousChar byte, text []byte) int {
+	if wordBoundary(previousChar) && previousChar != '/' && len(text) >= 3 {
+		if text[0] == '1' && text[1] == '/' && text[2] == '2' {
+			if len(text) < 4 || wordBoundary(text[3]) && text[3] != '/' {
+				out.WriteString("&frac12;")
+				return 2
+			}
+		}
+
+		if text[0] == '1' && text[1] == '/' && text[2] == '4' {
+			if len(text) < 4 || wordBoundary(text[3]) && text[3] != '/' || (len(text) >= 5 && tolower(text[3]) == 't' && tolower(text[4]) == 'h') {
+				out.WriteString("&frac14;")
+				return 2
+			}
+		}
+
+		if text[0] == '3' && text[1] == '/' && text[2] == '4' {
+			if len(text) < 4 || wordBoundary(text[3]) && text[3] != '/' || (len(text) >= 6 && tolower(text[3]) == 't' && tolower(text[4]) == 'h' && tolower(text[5]) == 's') {
+				out.WriteString("&frac34;")
+				return 2
+			}
+		}
+	}
+
+	out.WriteByte(text[0])
+	return 0
+}
+
+func smartDoubleQuoteVariant(out *bytes.Buffer, smrt *smartypantsData, previousChar byte, text []byte, quote byte) int {
+	nextChar := byte(0)
+	if len(text) > 1 {
+		nextChar = text[1]
+	}
+	if !smartQuoteHelper(out, previousChar, nextChar, quote, &smrt.inDoubleQuote, false) {
+		out.WriteString("&quot;")
+	}
+
+	return 0
+}
+
+func smartDoubleQuote(out *bytes.Buffer, smrt *smartypantsData, previousChar byte, text []byte) int {
+	return smartDoubleQuoteVariant(out, smrt, previousChar, text, 'd')
+}
+
+func smartAngledDoubleQuote(out *bytes.Buffer, smrt *smartypantsData, previousChar byte, text []byte) int {
+	return smartDoubleQuoteVariant(out, smrt, previousChar, text, 'a')
+}
+
+func smartLeftAngle(out *bytes.Buffer, smrt *smartypantsData, previousChar byte, text []byte) int {
+	i := 0
+
+	for i < len(text) && text[i] != '>' {
+		i++
+	}
+
+	out.Write(text[:i+1])
+	return i
+}
+
+type smartCallback func(out *bytes.Buffer, smrt *smartypantsData, previousChar byte, text []byte) int
+
+type smartypantsRenderer [256]smartCallback
+
+var (
+	smartAmpAngled      = smartAmp(true, false)
+	smartAmpAngledNBSP  = smartAmp(true, true)
+	smartAmpRegular     = smartAmp(false, false)
+	smartAmpRegularNBSP = smartAmp(false, true)
+)
+
+func smartypants(flags int) *smartypantsRenderer {
+	r := new(smartypantsRenderer)
+	addNBSP := flags&HTML_SMARTYPANTS_QUOTES_NBSP != 0
+	if flags&HTML_SMARTYPANTS_ANGLED_QUOTES == 0 {
+		r['"'] = smartDoubleQuote
+		if !addNBSP {
+			r['&'] = smartAmpRegular
+		} else {
+			r['&'] = smartAmpRegularNBSP
+		}
+	} else {
+		r['"'] = smartAngledDoubleQuote
+		if !addNBSP {
+			r['&'] = smartAmpAngled
+		} else {
+			r['&'] = smartAmpAngledNBSP
+		}
+	}
+	r['\''] = smartSingleQuote
+	r['('] = smartParens
+	if flags&HTML_SMARTYPANTS_DASHES != 0 {
+		if flags&HTML_SMARTYPANTS_LATEX_DASHES == 0 {
+			r['-'] = smartDash
+		} else {
+			r['-'] = smartDashLatex
+		}
+	}
+	r['.'] = smartPeriod
+	if flags&HTML_SMARTYPANTS_FRACTIONS == 0 {
+		r['1'] = smartNumber
+		r['3'] = smartNumber
+	} else {
+		for ch := '1'; ch <= '9'; ch++ {
+			r[ch] = smartNumberGeneric
+		}
+	}
+	r['<'] = smartLeftAngle
+	r['`'] = smartBacktick
+	return r
+}
diff --git a/vendor/github.com/spf13/cobra/.gitignore b/vendor/github.com/spf13/cobra/.gitignore
new file mode 100644
index 00000000..3b053c59
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/.gitignore
@@ -0,0 +1,38 @@
+# Compiled Object files, Static and Dynamic libs (Shared Objects)
+*.o
+*.a
+*.so
+
+# Folders
+_obj
+_test
+
+# Architecture specific extensions/prefixes
+*.[568vq]
+[568vq].out
+
+*.cgo1.go
+*.cgo2.c
+_cgo_defun.c
+_cgo_gotypes.go
+_cgo_export.*
+
+_testmain.go
+
+# Vim files https://github.com/github/gitignore/blob/master/Global/Vim.gitignore
+# swap
+[._]*.s[a-w][a-z]
+[._]s[a-w][a-z]
+# session
+Session.vim
+# temporary
+.netrwhist
+*~
+# auto-generated tag files
+tags
+
+*.exe
+
+cobra.test
+
+.idea/*
diff --git a/vendor/github.com/spf13/cobra/.mailmap b/vendor/github.com/spf13/cobra/.mailmap
new file mode 100644
index 00000000..94ec5306
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/.mailmap
@@ -0,0 +1,3 @@
+Steve Francia <steve.francia@gmail.com>
+Bjørn Erik Pedersen <bjorn.erik.pedersen@gmail.com>
+Fabiano Franz <ffranz@redhat.com>                   <contact@fabianofranz.com>
diff --git a/vendor/github.com/spf13/cobra/.travis.yml b/vendor/github.com/spf13/cobra/.travis.yml
new file mode 100644
index 00000000..38b85f49
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/.travis.yml
@@ -0,0 +1,31 @@
+language: go
+
+stages:
+  - diff
+  - test
+
+go:
+  - 1.10.x
+  - 1.11.x
+  - 1.12.x
+  - tip
+
+matrix:
+  allow_failures:
+    - go: tip
+  include:
+    - stage: diff
+      go: 1.12.x
+      script: diff -u <(echo -n) <(gofmt -d -s .)
+
+before_install:
+  - mkdir -p bin
+  - curl -Lso bin/shellcheck https://github.com/caarlos0/shellcheck-docker/releases/download/v0.6.0/shellcheck
+  - chmod +x bin/shellcheck
+  - go get -u github.com/kyoh86/richgo
+script:
+  - PATH=$PATH:$PWD/bin richgo test -v ./...
+  - go build
+  - if [ -z $NOVET ]; then
+      diff -u <(echo -n) <(go vet . 2>&1 | grep -vE 'ExampleCommand|bash_completions.*Fprint');
+    fi
diff --git a/vendor/github.com/spf13/cobra/LICENSE.txt b/vendor/github.com/spf13/cobra/LICENSE.txt
new file mode 100644
index 00000000..298f0e26
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/LICENSE.txt
@@ -0,0 +1,174 @@
+                                Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
diff --git a/vendor/github.com/spf13/cobra/README.md b/vendor/github.com/spf13/cobra/README.md
new file mode 100644
index 00000000..60c5a425
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/README.md
@@ -0,0 +1,741 @@
+![cobra logo](https://cloud.githubusercontent.com/assets/173412/10886352/ad566232-814f-11e5-9cd0-aa101788c117.png)
+
+Cobra is both a library for creating powerful modern CLI applications as well as a program to generate applications and command files.
+
+Many of the most widely used Go projects are built using Cobra, such as:
+[Kubernetes](http://kubernetes.io/),
+[Hugo](http://gohugo.io),
+[rkt](https://github.com/coreos/rkt),
+[etcd](https://github.com/coreos/etcd),
+[Moby (former Docker)](https://github.com/moby/moby),
+[Docker (distribution)](https://github.com/docker/distribution),
+[OpenShift](https://www.openshift.com/),
+[Delve](https://github.com/derekparker/delve),
+[GopherJS](http://www.gopherjs.org/),
+[CockroachDB](http://www.cockroachlabs.com/),
+[Bleve](http://www.blevesearch.com/),
+[ProjectAtomic (enterprise)](http://www.projectatomic.io/),
+[Giant Swarm's gsctl](https://github.com/giantswarm/gsctl),
+[Nanobox](https://github.com/nanobox-io/nanobox)/[Nanopack](https://github.com/nanopack),
+[rclone](http://rclone.org/),
+[nehm](https://github.com/bogem/nehm),
+[Pouch](https://github.com/alibaba/pouch),
+[Istio](https://istio.io),
+[Prototool](https://github.com/uber/prototool),
+[mattermost-server](https://github.com/mattermost/mattermost-server),
+[Gardener](https://github.com/gardener/gardenctl),
+etc.
+
+[![Build Status](https://travis-ci.org/spf13/cobra.svg "Travis CI status")](https://travis-ci.org/spf13/cobra)
+[![CircleCI status](https://circleci.com/gh/spf13/cobra.png?circle-token=:circle-token "CircleCI status")](https://circleci.com/gh/spf13/cobra)
+[![GoDoc](https://godoc.org/github.com/spf13/cobra?status.svg)](https://godoc.org/github.com/spf13/cobra)
+
+# Table of Contents
+
+- [Overview](#overview)
+- [Concepts](#concepts)
+  * [Commands](#commands)
+  * [Flags](#flags)
+- [Installing](#installing)
+- [Getting Started](#getting-started)
+  * [Using the Cobra Generator](#using-the-cobra-generator)
+  * [Using the Cobra Library](#using-the-cobra-library)
+  * [Working with Flags](#working-with-flags)
+  * [Positional and Custom Arguments](#positional-and-custom-arguments)
+  * [Example](#example)
+  * [Help Command](#help-command)
+  * [Usage Message](#usage-message)
+  * [PreRun and PostRun Hooks](#prerun-and-postrun-hooks)
+  * [Suggestions when "unknown command" happens](#suggestions-when-unknown-command-happens)
+  * [Generating documentation for your command](#generating-documentation-for-your-command)
+  * [Generating bash completions](#generating-bash-completions)
+  * [Generating zsh completions](#generating-zsh-completions)
+- [Contributing](#contributing)
+- [License](#license)
+
+# Overview
+
+Cobra is a library providing a simple interface to create powerful modern CLI
+interfaces similar to git & go tools.
+
+Cobra is also an application that will generate your application scaffolding to rapidly
+develop a Cobra-based application.
+
+Cobra provides:
+* Easy subcommand-based CLIs: `app server`, `app fetch`, etc.
+* Fully POSIX-compliant flags (including short & long versions)
+* Nested subcommands
+* Global, local and cascading flags
+* Easy generation of applications & commands with `cobra init appname` & `cobra add cmdname`
+* Intelligent suggestions (`app srver`... did you mean `app server`?)
+* Automatic help generation for commands and flags
+* Automatic help flag recognition of `-h`, `--help`, etc.
+* Automatically generated bash autocomplete for your application
+* Automatically generated man pages for your application
+* Command aliases so you can change things without breaking them
+* The flexibility to define your own help, usage, etc.
+* Optional tight integration with [viper](http://github.com/spf13/viper) for 12-factor apps
+
+# Concepts
+
+Cobra is built on a structure of commands, arguments & flags.
+
+**Commands** represent actions, **Args** are things and **Flags** are modifiers for those actions.
+
+The best applications will read like sentences when used. Users will know how
+to use the application because they will natively understand how to use it.
+
+The pattern to follow is
+`APPNAME VERB NOUN --ADJECTIVE.`
+    or
+`APPNAME COMMAND ARG --FLAG`
+
+A few good real world examples may better illustrate this point.
+
+In the following example, 'server' is a command, and 'port' is a flag:
+
+    hugo server --port=1313
+
+In this command we are telling Git to clone the url bare.
+
+    git clone URL --bare
+
+## Commands
+
+Command is the central point of the application. Each interaction that
+the application supports will be contained in a Command. A command can
+have children commands and optionally run an action.
+
+In the example above, 'server' is the command.
+
+[More about cobra.Command](https://godoc.org/github.com/spf13/cobra#Command)
+
+## Flags
+
+A flag is a way to modify the behavior of a command. Cobra supports
+fully POSIX-compliant flags as well as the Go [flag package](https://golang.org/pkg/flag/).
+A Cobra command can define flags that persist through to children commands
+and flags that are only available to that command.
+
+In the example above, 'port' is the flag.
+
+Flag functionality is provided by the [pflag
+library](https://github.com/spf13/pflag), a fork of the flag standard library
+which maintains the same interface while adding POSIX compliance.
+
+# Installing
+Using Cobra is easy. First, use `go get` to install the latest version
+of the library. This command will install the `cobra` generator executable
+along with the library and its dependencies:
+
+    go get -u github.com/spf13/cobra/cobra
+
+Next, include Cobra in your application:
+
+```go
+import "github.com/spf13/cobra"
+```
+
+# Getting Started
+
+While you are welcome to provide your own organization, typically a Cobra-based
+application will follow the following organizational structure:
+
+```
+  ▾ appName/
+    ▾ cmd/
+        add.go
+        your.go
+        commands.go
+        here.go
+      main.go
+```
+
+In a Cobra app, typically the main.go file is very bare. It serves one purpose: initializing Cobra.
+
+```go
+package main
+
+import (
+  "{pathToYourApp}/cmd"
+)
+
+func main() {
+  cmd.Execute()
+}
+```
+
+## Using the Cobra Generator
+
+Cobra provides its own program that will create your application and add any
+commands you want. It's the easiest way to incorporate Cobra into your application.
+
+[Here](https://github.com/spf13/cobra/blob/master/cobra/README.md) you can find more information about it.
+
+## Using the Cobra Library
+
+To manually implement Cobra you need to create a bare main.go file and a rootCmd file.
+You will optionally provide additional commands as you see fit.
+
+### Create rootCmd
+
+Cobra doesn't require any special constructors. Simply create your commands.
+
+Ideally you place this in app/cmd/root.go:
+
+```go
+var rootCmd = &cobra.Command{
+  Use:   "hugo",
+  Short: "Hugo is a very fast static site generator",
+  Long: `A Fast and Flexible Static Site Generator built with
+                love by spf13 and friends in Go.
+                Complete documentation is available at http://hugo.spf13.com`,
+  Run: func(cmd *cobra.Command, args []string) {
+    // Do Stuff Here
+  },
+}
+
+func Execute() {
+  if err := rootCmd.Execute(); err != nil {
+    fmt.Println(err)
+    os.Exit(1)
+  }
+}
+```
+
+You will additionally define flags and handle configuration in your init() function.
+
+For example cmd/root.go:
+
+```go
+import (
+  "fmt"
+  "os"
+
+  homedir "github.com/mitchellh/go-homedir"
+  "github.com/spf13/cobra"
+  "github.com/spf13/viper"
+)
+
+func init() {
+  cobra.OnInitialize(initConfig)
+  rootCmd.PersistentFlags().StringVar(&cfgFile, "config", "", "config file (default is $HOME/.cobra.yaml)")
+  rootCmd.PersistentFlags().StringVarP(&projectBase, "projectbase", "b", "", "base project directory eg. github.com/spf13/")
+  rootCmd.PersistentFlags().StringP("author", "a", "YOUR NAME", "Author name for copyright attribution")
+  rootCmd.PersistentFlags().StringVarP(&userLicense, "license", "l", "", "Name of license for the project (can provide `licensetext` in config)")
+  rootCmd.PersistentFlags().Bool("viper", true, "Use Viper for configuration")
+  viper.BindPFlag("author", rootCmd.PersistentFlags().Lookup("author"))
+  viper.BindPFlag("projectbase", rootCmd.PersistentFlags().Lookup("projectbase"))
+  viper.BindPFlag("useViper", rootCmd.PersistentFlags().Lookup("viper"))
+  viper.SetDefault("author", "NAME HERE <EMAIL ADDRESS>")
+  viper.SetDefault("license", "apache")
+}
+
+func initConfig() {
+  // Don't forget to read config either from cfgFile or from home directory!
+  if cfgFile != "" {
+    // Use config file from the flag.
+    viper.SetConfigFile(cfgFile)
+  } else {
+    // Find home directory.
+    home, err := homedir.Dir()
+    if err != nil {
+      fmt.Println(err)
+      os.Exit(1)
+    }
+
+    // Search config in home directory with name ".cobra" (without extension).
+    viper.AddConfigPath(home)
+    viper.SetConfigName(".cobra")
+  }
+
+  if err := viper.ReadInConfig(); err != nil {
+    fmt.Println("Can't read config:", err)
+    os.Exit(1)
+  }
+}
+```
+
+### Create your main.go
+
+With the root command you need to have your main function execute it.
+Execute should be run on the root for clarity, though it can be called on any command.
+
+In a Cobra app, typically the main.go file is very bare. It serves, one purpose, to initialize Cobra.
+
+```go
+package main
+
+import (
+  "{pathToYourApp}/cmd"
+)
+
+func main() {
+  cmd.Execute()
+}
+```
+
+### Create additional commands
+
+Additional commands can be defined and typically are each given their own file
+inside of the cmd/ directory.
+
+If you wanted to create a version command you would create cmd/version.go and
+populate it with the following:
+
+```go
+package cmd
+
+import (
+  "fmt"
+
+  "github.com/spf13/cobra"
+)
+
+func init() {
+  rootCmd.AddCommand(versionCmd)
+}
+
+var versionCmd = &cobra.Command{
+  Use:   "version",
+  Short: "Print the version number of Hugo",
+  Long:  `All software has versions. This is Hugo's`,
+  Run: func(cmd *cobra.Command, args []string) {
+    fmt.Println("Hugo Static Site Generator v0.9 -- HEAD")
+  },
+}
+```
+
+## Working with Flags
+
+Flags provide modifiers to control how the action command operates.
+
+### Assign flags to a command
+
+Since the flags are defined and used in different locations, we need to
+define a variable outside with the correct scope to assign the flag to
+work with.
+
+```go
+var Verbose bool
+var Source string
+```
+
+There are two different approaches to assign a flag.
+
+### Persistent Flags
+
+A flag can be 'persistent' meaning that this flag will be available to the
+command it's assigned to as well as every command under that command. For
+global flags, assign a flag as a persistent flag on the root.
+
+```go
+rootCmd.PersistentFlags().BoolVarP(&Verbose, "verbose", "v", false, "verbose output")
+```
+
+### Local Flags
+
+A flag can also be assigned locally which will only apply to that specific command.
+
+```go
+localCmd.Flags().StringVarP(&Source, "source", "s", "", "Source directory to read from")
+```
+
+### Local Flag on Parent Commands
+
+By default Cobra only parses local flags on the target command, any local flags on
+parent commands are ignored. By enabling `Command.TraverseChildren` Cobra will
+parse local flags on each command before executing the target command.
+
+```go
+command := cobra.Command{
+  Use: "print [OPTIONS] [COMMANDS]",
+  TraverseChildren: true,
+}
+```
+
+### Bind Flags with Config
+
+You can also bind your flags with [viper](https://github.com/spf13/viper):
+```go
+var author string
+
+func init() {
+  rootCmd.PersistentFlags().StringVar(&author, "author", "YOUR NAME", "Author name for copyright attribution")
+  viper.BindPFlag("author", rootCmd.PersistentFlags().Lookup("author"))
+}
+```
+
+In this example the persistent flag `author` is bound with `viper`.
+**Note**, that the variable `author` will not be set to the value from config,
+when the `--author` flag is not provided by user.
+
+More in [viper documentation](https://github.com/spf13/viper#working-with-flags).
+
+### Required flags
+
+Flags are optional by default. If instead you wish your command to report an error
+when a flag has not been set, mark it as required:
+```go
+rootCmd.Flags().StringVarP(&Region, "region", "r", "", "AWS region (required)")
+rootCmd.MarkFlagRequired("region")
+```
+
+## Positional and Custom Arguments
+
+Validation of positional arguments can be specified using the `Args` field
+of `Command`.
+
+The following validators are built in:
+
+- `NoArgs` - the command will report an error if there are any positional args.
+- `ArbitraryArgs` - the command will accept any args.
+- `OnlyValidArgs` - the command will report an error if there are any positional args that are not in the `ValidArgs` field of `Command`.
+- `MinimumNArgs(int)` - the command will report an error if there are not at least N positional args.
+- `MaximumNArgs(int)` - the command will report an error if there are more than N positional args.
+- `ExactArgs(int)` - the command will report an error if there are not exactly N positional args.
+- `ExactValidArgs(int)` - the command will report an error if there are not exactly N positional args OR if there are any positional args that are not in the `ValidArgs` field of `Command`
+- `RangeArgs(min, max)` - the command will report an error if the number of args is not between the minimum and maximum number of expected args.
+
+An example of setting the custom validator:
+
+```go
+var cmd = &cobra.Command{
+  Short: "hello",
+  Args: func(cmd *cobra.Command, args []string) error {
+    if len(args) < 1 {
+      return errors.New("requires a color argument")
+    }
+    if myapp.IsValidColor(args[0]) {
+      return nil
+    }
+    return fmt.Errorf("invalid color specified: %s", args[0])
+  },
+  Run: func(cmd *cobra.Command, args []string) {
+    fmt.Println("Hello, World!")
+  },
+}
+```
+
+## Example
+
+In the example below, we have defined three commands. Two are at the top level
+and one (cmdTimes) is a child of one of the top commands. In this case the root
+is not executable meaning that a subcommand is required. This is accomplished
+by not providing a 'Run' for the 'rootCmd'.
+
+We have only defined one flag for a single command.
+
+More documentation about flags is available at https://github.com/spf13/pflag
+
+```go
+package main
+
+import (
+  "fmt"
+  "strings"
+
+  "github.com/spf13/cobra"
+)
+
+func main() {
+  var echoTimes int
+
+  var cmdPrint = &cobra.Command{
+    Use:   "print [string to print]",
+    Short: "Print anything to the screen",
+    Long: `print is for printing anything back to the screen.
+For many years people have printed back to the screen.`,
+    Args: cobra.MinimumNArgs(1),
+    Run: func(cmd *cobra.Command, args []string) {
+      fmt.Println("Print: " + strings.Join(args, " "))
+    },
+  }
+
+  var cmdEcho = &cobra.Command{
+    Use:   "echo [string to echo]",
+    Short: "Echo anything to the screen",
+    Long: `echo is for echoing anything back.
+Echo works a lot like print, except it has a child command.`,
+    Args: cobra.MinimumNArgs(1),
+    Run: func(cmd *cobra.Command, args []string) {
+      fmt.Println("Print: " + strings.Join(args, " "))
+    },
+  }
+
+  var cmdTimes = &cobra.Command{
+    Use:   "times [string to echo]",
+    Short: "Echo anything to the screen more times",
+    Long: `echo things multiple times back to the user by providing
+a count and a string.`,
+    Args: cobra.MinimumNArgs(1),
+    Run: func(cmd *cobra.Command, args []string) {
+      for i := 0; i < echoTimes; i++ {
+        fmt.Println("Echo: " + strings.Join(args, " "))
+      }
+    },
+  }
+
+  cmdTimes.Flags().IntVarP(&echoTimes, "times", "t", 1, "times to echo the input")
+
+  var rootCmd = &cobra.Command{Use: "app"}
+  rootCmd.AddCommand(cmdPrint, cmdEcho)
+  cmdEcho.AddCommand(cmdTimes)
+  rootCmd.Execute()
+}
+```
+
+For a more complete example of a larger application, please checkout [Hugo](http://gohugo.io/).
+
+## Help Command
+
+Cobra automatically adds a help command to your application when you have subcommands.
+This will be called when a user runs 'app help'. Additionally, help will also
+support all other commands as input. Say, for instance, you have a command called
+'create' without any additional configuration; Cobra will work when 'app help
+create' is called.  Every command will automatically have the '--help' flag added.
+
+### Example
+
+The following output is automatically generated by Cobra. Nothing beyond the
+command and flag definitions are needed.
+
+    $ cobra help
+
+    Cobra is a CLI library for Go that empowers applications.
+    This application is a tool to generate the needed files
+    to quickly create a Cobra application.
+
+    Usage:
+      cobra [command]
+
+    Available Commands:
+      add         Add a command to a Cobra Application
+      help        Help about any command
+      init        Initialize a Cobra Application
+
+    Flags:
+      -a, --author string    author name for copyright attribution (default "YOUR NAME")
+          --config string    config file (default is $HOME/.cobra.yaml)
+      -h, --help             help for cobra
+      -l, --license string   name of license for the project
+          --viper            use Viper for configuration (default true)
+
+    Use "cobra [command] --help" for more information about a command.
+
+
+Help is just a command like any other. There is no special logic or behavior
+around it. In fact, you can provide your own if you want.
+
+### Defining your own help
+
+You can provide your own Help command or your own template for the default command to use
+with following functions:
+
+```go
+cmd.SetHelpCommand(cmd *Command)
+cmd.SetHelpFunc(f func(*Command, []string))
+cmd.SetHelpTemplate(s string)
+```
+
+The latter two will also apply to any children commands.
+
+## Usage Message
+
+When the user provides an invalid flag or invalid command, Cobra responds by
+showing the user the 'usage'.
+
+### Example
+You may recognize this from the help above. That's because the default help
+embeds the usage as part of its output.
+
+    $ cobra --invalid
+    Error: unknown flag: --invalid
+    Usage:
+      cobra [command]
+
+    Available Commands:
+      add         Add a command to a Cobra Application
+      help        Help about any command
+      init        Initialize a Cobra Application
+
+    Flags:
+      -a, --author string    author name for copyright attribution (default "YOUR NAME")
+          --config string    config file (default is $HOME/.cobra.yaml)
+      -h, --help             help for cobra
+      -l, --license string   name of license for the project
+          --viper            use Viper for configuration (default true)
+
+    Use "cobra [command] --help" for more information about a command.
+
+### Defining your own usage
+You can provide your own usage function or template for Cobra to use.
+Like help, the function and template are overridable through public methods:
+
+```go
+cmd.SetUsageFunc(f func(*Command) error)
+cmd.SetUsageTemplate(s string)
+```
+
+## Version Flag
+
+Cobra adds a top-level '--version' flag if the Version field is set on the root command.
+Running an application with the '--version' flag will print the version to stdout using
+the version template. The template can be customized using the
+`cmd.SetVersionTemplate(s string)` function.
+
+## PreRun and PostRun Hooks
+
+It is possible to run functions before or after the main `Run` function of your command. The `PersistentPreRun` and `PreRun` functions will be executed before `Run`. `PersistentPostRun` and `PostRun` will be executed after `Run`.  The `Persistent*Run` functions will be inherited by children if they do not declare their own.  These functions are run in the following order:
+
+- `PersistentPreRun`
+- `PreRun`
+- `Run`
+- `PostRun`
+- `PersistentPostRun`
+
+An example of two commands which use all of these features is below.  When the subcommand is executed, it will run the root command's `PersistentPreRun` but not the root command's `PersistentPostRun`:
+
+```go
+package main
+
+import (
+  "fmt"
+
+  "github.com/spf13/cobra"
+)
+
+func main() {
+
+  var rootCmd = &cobra.Command{
+    Use:   "root [sub]",
+    Short: "My root command",
+    PersistentPreRun: func(cmd *cobra.Command, args []string) {
+      fmt.Printf("Inside rootCmd PersistentPreRun with args: %v\n", args)
+    },
+    PreRun: func(cmd *cobra.Command, args []string) {
+      fmt.Printf("Inside rootCmd PreRun with args: %v\n", args)
+    },
+    Run: func(cmd *cobra.Command, args []string) {
+      fmt.Printf("Inside rootCmd Run with args: %v\n", args)
+    },
+    PostRun: func(cmd *cobra.Command, args []string) {
+      fmt.Printf("Inside rootCmd PostRun with args: %v\n", args)
+    },
+    PersistentPostRun: func(cmd *cobra.Command, args []string) {
+      fmt.Printf("Inside rootCmd PersistentPostRun with args: %v\n", args)
+    },
+  }
+
+  var subCmd = &cobra.Command{
+    Use:   "sub [no options!]",
+    Short: "My subcommand",
+    PreRun: func(cmd *cobra.Command, args []string) {
+      fmt.Printf("Inside subCmd PreRun with args: %v\n", args)
+    },
+    Run: func(cmd *cobra.Command, args []string) {
+      fmt.Printf("Inside subCmd Run with args: %v\n", args)
+    },
+    PostRun: func(cmd *cobra.Command, args []string) {
+      fmt.Printf("Inside subCmd PostRun with args: %v\n", args)
+    },
+    PersistentPostRun: func(cmd *cobra.Command, args []string) {
+      fmt.Printf("Inside subCmd PersistentPostRun with args: %v\n", args)
+    },
+  }
+
+  rootCmd.AddCommand(subCmd)
+
+  rootCmd.SetArgs([]string{""})
+  rootCmd.Execute()
+  fmt.Println()
+  rootCmd.SetArgs([]string{"sub", "arg1", "arg2"})
+  rootCmd.Execute()
+}
+```
+
+Output:
+```
+Inside rootCmd PersistentPreRun with args: []
+Inside rootCmd PreRun with args: []
+Inside rootCmd Run with args: []
+Inside rootCmd PostRun with args: []
+Inside rootCmd PersistentPostRun with args: []
+
+Inside rootCmd PersistentPreRun with args: [arg1 arg2]
+Inside subCmd PreRun with args: [arg1 arg2]
+Inside subCmd Run with args: [arg1 arg2]
+Inside subCmd PostRun with args: [arg1 arg2]
+Inside subCmd PersistentPostRun with args: [arg1 arg2]
+```
+
+## Suggestions when "unknown command" happens
+
+Cobra will print automatic suggestions when "unknown command" errors happen. This allows Cobra to behave similarly to the `git` command when a typo happens. For example:
+
+```
+$ hugo srever
+Error: unknown command "srever" for "hugo"
+
+Did you mean this?
+        server
+
+Run 'hugo --help' for usage.
+```
+
+Suggestions are automatic based on every subcommand registered and use an implementation of [Levenshtein distance](http://en.wikipedia.org/wiki/Levenshtein_distance). Every registered command that matches a minimum distance of 2 (ignoring case) will be displayed as a suggestion.
+
+If you need to disable suggestions or tweak the string distance in your command, use:
+
+```go
+command.DisableSuggestions = true
+```
+
+or
+
+```go
+command.SuggestionsMinimumDistance = 1
+```
+
+You can also explicitly set names for which a given command will be suggested using the `SuggestFor` attribute. This allows suggestions for strings that are not close in terms of string distance, but makes sense in your set of commands and for some which you don't want aliases. Example:
+
+```
+$ kubectl remove
+Error: unknown command "remove" for "kubectl"
+
+Did you mean this?
+        delete
+
+Run 'kubectl help' for usage.
+```
+
+## Generating documentation for your command
+
+Cobra can generate documentation based on subcommands, flags, etc. in the following formats:
+
+- [Markdown](doc/md_docs.md)
+- [ReStructured Text](doc/rest_docs.md)
+- [Man Page](doc/man_docs.md)
+
+## Generating bash completions
+
+Cobra can generate a bash-completion file. If you add more information to your command, these completions can be amazingly powerful and flexible.  Read more about it in [Bash Completions](bash_completions.md).
+
+## Generating zsh completions
+
+Cobra can generate zsh-completion file. Read more about it in
+[Zsh Completions](zsh_completions.md).
+
+# Contributing
+
+1. Fork it
+2. Download your fork to your PC (`git clone https://github.com/your_username/cobra && cd cobra`)
+3. Create your feature branch (`git checkout -b my-new-feature`)
+4. Make changes and add them (`git add .`)
+5. Commit your changes (`git commit -m 'Add some feature'`)
+6. Push to the branch (`git push origin my-new-feature`)
+7. Create new pull request
+
+# License
+
+Cobra is released under the Apache 2.0 license. See [LICENSE.txt](https://github.com/spf13/cobra/blob/master/LICENSE.txt)
diff --git a/vendor/github.com/spf13/cobra/args.go b/vendor/github.com/spf13/cobra/args.go
new file mode 100644
index 00000000..c4d820b8
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/args.go
@@ -0,0 +1,101 @@
+package cobra
+
+import (
+	"fmt"
+)
+
+type PositionalArgs func(cmd *Command, args []string) error
+
+// Legacy arg validation has the following behaviour:
+// - root commands with no subcommands can take arbitrary arguments
+// - root commands with subcommands will do subcommand validity checking
+// - subcommands will always accept arbitrary arguments
+func legacyArgs(cmd *Command, args []string) error {
+	// no subcommand, always take args
+	if !cmd.HasSubCommands() {
+		return nil
+	}
+
+	// root command with subcommands, do subcommand checking.
+	if !cmd.HasParent() && len(args) > 0 {
+		return fmt.Errorf("unknown command %q for %q%s", args[0], cmd.CommandPath(), cmd.findSuggestions(args[0]))
+	}
+	return nil
+}
+
+// NoArgs returns an error if any args are included.
+func NoArgs(cmd *Command, args []string) error {
+	if len(args) > 0 {
+		return fmt.Errorf("unknown command %q for %q", args[0], cmd.CommandPath())
+	}
+	return nil
+}
+
+// OnlyValidArgs returns an error if any args are not in the list of ValidArgs.
+func OnlyValidArgs(cmd *Command, args []string) error {
+	if len(cmd.ValidArgs) > 0 {
+		for _, v := range args {
+			if !stringInSlice(v, cmd.ValidArgs) {
+				return fmt.Errorf("invalid argument %q for %q%s", v, cmd.CommandPath(), cmd.findSuggestions(args[0]))
+			}
+		}
+	}
+	return nil
+}
+
+// ArbitraryArgs never returns an error.
+func ArbitraryArgs(cmd *Command, args []string) error {
+	return nil
+}
+
+// MinimumNArgs returns an error if there is not at least N args.
+func MinimumNArgs(n int) PositionalArgs {
+	return func(cmd *Command, args []string) error {
+		if len(args) < n {
+			return fmt.Errorf("requires at least %d arg(s), only received %d", n, len(args))
+		}
+		return nil
+	}
+}
+
+// MaximumNArgs returns an error if there are more than N args.
+func MaximumNArgs(n int) PositionalArgs {
+	return func(cmd *Command, args []string) error {
+		if len(args) > n {
+			return fmt.Errorf("accepts at most %d arg(s), received %d", n, len(args))
+		}
+		return nil
+	}
+}
+
+// ExactArgs returns an error if there are not exactly n args.
+func ExactArgs(n int) PositionalArgs {
+	return func(cmd *Command, args []string) error {
+		if len(args) != n {
+			return fmt.Errorf("accepts %d arg(s), received %d", n, len(args))
+		}
+		return nil
+	}
+}
+
+// ExactValidArgs returns an error if
+// there are not exactly N positional args OR
+// there are any positional args that are not in the `ValidArgs` field of `Command`
+func ExactValidArgs(n int) PositionalArgs {
+	return func(cmd *Command, args []string) error {
+		if err := ExactArgs(n)(cmd, args); err != nil {
+			return err
+		}
+		return OnlyValidArgs(cmd, args)
+	}
+}
+
+// RangeArgs returns an error if the number of args is not within the expected range.
+func RangeArgs(min int, max int) PositionalArgs {
+	return func(cmd *Command, args []string) error {
+		if len(args) < min || len(args) > max {
+			return fmt.Errorf("accepts between %d and %d arg(s), received %d", min, max, len(args))
+		}
+		return nil
+	}
+}
diff --git a/vendor/github.com/spf13/cobra/bash_completions.go b/vendor/github.com/spf13/cobra/bash_completions.go
new file mode 100644
index 00000000..57bb8e1b
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/bash_completions.go
@@ -0,0 +1,547 @@
+package cobra
+
+import (
+	"bytes"
+	"fmt"
+	"io"
+	"os"
+	"sort"
+	"strings"
+
+	"github.com/spf13/pflag"
+)
+
+// Annotations for Bash completion.
+const (
+	BashCompFilenameExt     = "cobra_annotation_bash_completion_filename_extensions"
+	BashCompCustom          = "cobra_annotation_bash_completion_custom"
+	BashCompOneRequiredFlag = "cobra_annotation_bash_completion_one_required_flag"
+	BashCompSubdirsInDir    = "cobra_annotation_bash_completion_subdirs_in_dir"
+)
+
+func writePreamble(buf *bytes.Buffer, name string) {
+	buf.WriteString(fmt.Sprintf("# bash completion for %-36s -*- shell-script -*-\n", name))
+	buf.WriteString(fmt.Sprintf(`
+__%[1]s_debug()
+{
+    if [[ -n ${BASH_COMP_DEBUG_FILE} ]]; then
+        echo "$*" >> "${BASH_COMP_DEBUG_FILE}"
+    fi
+}
+
+# Homebrew on Macs have version 1.3 of bash-completion which doesn't include
+# _init_completion. This is a very minimal version of that function.
+__%[1]s_init_completion()
+{
+    COMPREPLY=()
+    _get_comp_words_by_ref "$@" cur prev words cword
+}
+
+__%[1]s_index_of_word()
+{
+    local w word=$1
+    shift
+    index=0
+    for w in "$@"; do
+        [[ $w = "$word" ]] && return
+        index=$((index+1))
+    done
+    index=-1
+}
+
+__%[1]s_contains_word()
+{
+    local w word=$1; shift
+    for w in "$@"; do
+        [[ $w = "$word" ]] && return
+    done
+    return 1
+}
+
+__%[1]s_handle_reply()
+{
+    __%[1]s_debug "${FUNCNAME[0]}"
+    case $cur in
+        -*)
+            if [[ $(type -t compopt) = "builtin" ]]; then
+                compopt -o nospace
+            fi
+            local allflags
+            if [ ${#must_have_one_flag[@]} -ne 0 ]; then
+                allflags=("${must_have_one_flag[@]}")
+            else
+                allflags=("${flags[*]} ${two_word_flags[*]}")
+            fi
+            COMPREPLY=( $(compgen -W "${allflags[*]}" -- "$cur") )
+            if [[ $(type -t compopt) = "builtin" ]]; then
+                [[ "${COMPREPLY[0]}" == *= ]] || compopt +o nospace
+            fi
+
+            # complete after --flag=abc
+            if [[ $cur == *=* ]]; then
+                if [[ $(type -t compopt) = "builtin" ]]; then
+                    compopt +o nospace
+                fi
+
+                local index flag
+                flag="${cur%%=*}"
+                __%[1]s_index_of_word "${flag}" "${flags_with_completion[@]}"
+                COMPREPLY=()
+                if [[ ${index} -ge 0 ]]; then
+                    PREFIX=""
+                    cur="${cur#*=}"
+                    ${flags_completion[${index}]}
+                    if [ -n "${ZSH_VERSION}" ]; then
+                        # zsh completion needs --flag= prefix
+                        eval "COMPREPLY=( \"\${COMPREPLY[@]/#/${flag}=}\" )"
+                    fi
+                fi
+            fi
+            return 0;
+            ;;
+    esac
+
+    # check if we are handling a flag with special work handling
+    local index
+    __%[1]s_index_of_word "${prev}" "${flags_with_completion[@]}"
+    if [[ ${index} -ge 0 ]]; then
+        ${flags_completion[${index}]}
+        return
+    fi
+
+    # we are parsing a flag and don't have a special handler, no completion
+    if [[ ${cur} != "${words[cword]}" ]]; then
+        return
+    fi
+
+    local completions
+    completions=("${commands[@]}")
+    if [[ ${#must_have_one_noun[@]} -ne 0 ]]; then
+        completions=("${must_have_one_noun[@]}")
+    fi
+    if [[ ${#must_have_one_flag[@]} -ne 0 ]]; then
+        completions+=("${must_have_one_flag[@]}")
+    fi
+    COMPREPLY=( $(compgen -W "${completions[*]}" -- "$cur") )
+
+    if [[ ${#COMPREPLY[@]} -eq 0 && ${#noun_aliases[@]} -gt 0 && ${#must_have_one_noun[@]} -ne 0 ]]; then
+        COMPREPLY=( $(compgen -W "${noun_aliases[*]}" -- "$cur") )
+    fi
+
+    if [[ ${#COMPREPLY[@]} -eq 0 ]]; then
+		if declare -F __%[1]s_custom_func >/dev/null; then
+			# try command name qualified custom func
+			__%[1]s_custom_func
+		else
+			# otherwise fall back to unqualified for compatibility
+			declare -F __custom_func >/dev/null && __custom_func
+		fi
+    fi
+
+    # available in bash-completion >= 2, not always present on macOS
+    if declare -F __ltrim_colon_completions >/dev/null; then
+        __ltrim_colon_completions "$cur"
+    fi
+
+    # If there is only 1 completion and it is a flag with an = it will be completed
+    # but we don't want a space after the =
+    if [[ "${#COMPREPLY[@]}" -eq "1" ]] && [[ $(type -t compopt) = "builtin" ]] && [[ "${COMPREPLY[0]}" == --*= ]]; then
+       compopt -o nospace
+    fi
+}
+
+# The arguments should be in the form "ext1|ext2|extn"
+__%[1]s_handle_filename_extension_flag()
+{
+    local ext="$1"
+    _filedir "@(${ext})"
+}
+
+__%[1]s_handle_subdirs_in_dir_flag()
+{
+    local dir="$1"
+    pushd "${dir}" >/dev/null 2>&1 && _filedir -d && popd >/dev/null 2>&1
+}
+
+__%[1]s_handle_flag()
+{
+    __%[1]s_debug "${FUNCNAME[0]}: c is $c words[c] is ${words[c]}"
+
+    # if a command required a flag, and we found it, unset must_have_one_flag()
+    local flagname=${words[c]}
+    local flagvalue
+    # if the word contained an =
+    if [[ ${words[c]} == *"="* ]]; then
+        flagvalue=${flagname#*=} # take in as flagvalue after the =
+        flagname=${flagname%%=*} # strip everything after the =
+        flagname="${flagname}=" # but put the = back
+    fi
+    __%[1]s_debug "${FUNCNAME[0]}: looking for ${flagname}"
+    if __%[1]s_contains_word "${flagname}" "${must_have_one_flag[@]}"; then
+        must_have_one_flag=()
+    fi
+
+    # if you set a flag which only applies to this command, don't show subcommands
+    if __%[1]s_contains_word "${flagname}" "${local_nonpersistent_flags[@]}"; then
+      commands=()
+    fi
+
+    # keep flag value with flagname as flaghash
+    # flaghash variable is an associative array which is only supported in bash > 3.
+    if [[ -z "${BASH_VERSION}" || "${BASH_VERSINFO[0]}" -gt 3 ]]; then
+        if [ -n "${flagvalue}" ] ; then
+            flaghash[${flagname}]=${flagvalue}
+        elif [ -n "${words[ $((c+1)) ]}" ] ; then
+            flaghash[${flagname}]=${words[ $((c+1)) ]}
+        else
+            flaghash[${flagname}]="true" # pad "true" for bool flag
+        fi
+    fi
+
+    # skip the argument to a two word flag
+    if [[ ${words[c]} != *"="* ]] && __%[1]s_contains_word "${words[c]}" "${two_word_flags[@]}"; then
+			  __%[1]s_debug "${FUNCNAME[0]}: found a flag ${words[c]}, skip the next argument"
+        c=$((c+1))
+        # if we are looking for a flags value, don't show commands
+        if [[ $c -eq $cword ]]; then
+            commands=()
+        fi
+    fi
+
+    c=$((c+1))
+
+}
+
+__%[1]s_handle_noun()
+{
+    __%[1]s_debug "${FUNCNAME[0]}: c is $c words[c] is ${words[c]}"
+
+    if __%[1]s_contains_word "${words[c]}" "${must_have_one_noun[@]}"; then
+        must_have_one_noun=()
+    elif __%[1]s_contains_word "${words[c]}" "${noun_aliases[@]}"; then
+        must_have_one_noun=()
+    fi
+
+    nouns+=("${words[c]}")
+    c=$((c+1))
+}
+
+__%[1]s_handle_command()
+{
+    __%[1]s_debug "${FUNCNAME[0]}: c is $c words[c] is ${words[c]}"
+
+    local next_command
+    if [[ -n ${last_command} ]]; then
+        next_command="_${last_command}_${words[c]//:/__}"
+    else
+        if [[ $c -eq 0 ]]; then
+            next_command="_%[1]s_root_command"
+        else
+            next_command="_${words[c]//:/__}"
+        fi
+    fi
+    c=$((c+1))
+    __%[1]s_debug "${FUNCNAME[0]}: looking for ${next_command}"
+    declare -F "$next_command" >/dev/null && $next_command
+}
+
+__%[1]s_handle_word()
+{
+    if [[ $c -ge $cword ]]; then
+        __%[1]s_handle_reply
+        return
+    fi
+    __%[1]s_debug "${FUNCNAME[0]}: c is $c words[c] is ${words[c]}"
+    if [[ "${words[c]}" == -* ]]; then
+        __%[1]s_handle_flag
+    elif __%[1]s_contains_word "${words[c]}" "${commands[@]}"; then
+        __%[1]s_handle_command
+    elif [[ $c -eq 0 ]]; then
+        __%[1]s_handle_command
+    elif __%[1]s_contains_word "${words[c]}" "${command_aliases[@]}"; then
+        # aliashash variable is an associative array which is only supported in bash > 3.
+        if [[ -z "${BASH_VERSION}" || "${BASH_VERSINFO[0]}" -gt 3 ]]; then
+            words[c]=${aliashash[${words[c]}]}
+            __%[1]s_handle_command
+        else
+            __%[1]s_handle_noun
+        fi
+    else
+        __%[1]s_handle_noun
+    fi
+    __%[1]s_handle_word
+}
+
+`, name))
+}
+
+func writePostscript(buf *bytes.Buffer, name string) {
+	name = strings.Replace(name, ":", "__", -1)
+	buf.WriteString(fmt.Sprintf("__start_%s()\n", name))
+	buf.WriteString(fmt.Sprintf(`{
+    local cur prev words cword
+    declare -A flaghash 2>/dev/null || :
+    declare -A aliashash 2>/dev/null || :
+    if declare -F _init_completion >/dev/null 2>&1; then
+        _init_completion -s || return
+    else
+        __%[1]s_init_completion -n "=" || return
+    fi
+
+    local c=0
+    local flags=()
+    local two_word_flags=()
+    local local_nonpersistent_flags=()
+    local flags_with_completion=()
+    local flags_completion=()
+    local commands=("%[1]s")
+    local must_have_one_flag=()
+    local must_have_one_noun=()
+    local last_command
+    local nouns=()
+
+    __%[1]s_handle_word
+}
+
+`, name))
+	buf.WriteString(fmt.Sprintf(`if [[ $(type -t compopt) = "builtin" ]]; then
+    complete -o default -F __start_%s %s
+else
+    complete -o default -o nospace -F __start_%s %s
+fi
+
+`, name, name, name, name))
+	buf.WriteString("# ex: ts=4 sw=4 et filetype=sh\n")
+}
+
+func writeCommands(buf *bytes.Buffer, cmd *Command) {
+	buf.WriteString("    commands=()\n")
+	for _, c := range cmd.Commands() {
+		if !c.IsAvailableCommand() || c == cmd.helpCommand {
+			continue
+		}
+		buf.WriteString(fmt.Sprintf("    commands+=(%q)\n", c.Name()))
+		writeCmdAliases(buf, c)
+	}
+	buf.WriteString("\n")
+}
+
+func writeFlagHandler(buf *bytes.Buffer, name string, annotations map[string][]string, cmd *Command) {
+	for key, value := range annotations {
+		switch key {
+		case BashCompFilenameExt:
+			buf.WriteString(fmt.Sprintf("    flags_with_completion+=(%q)\n", name))
+
+			var ext string
+			if len(value) > 0 {
+				ext = fmt.Sprintf("__%s_handle_filename_extension_flag ", cmd.Root().Name()) + strings.Join(value, "|")
+			} else {
+				ext = "_filedir"
+			}
+			buf.WriteString(fmt.Sprintf("    flags_completion+=(%q)\n", ext))
+		case BashCompCustom:
+			buf.WriteString(fmt.Sprintf("    flags_with_completion+=(%q)\n", name))
+			if len(value) > 0 {
+				handlers := strings.Join(value, "; ")
+				buf.WriteString(fmt.Sprintf("    flags_completion+=(%q)\n", handlers))
+			} else {
+				buf.WriteString("    flags_completion+=(:)\n")
+			}
+		case BashCompSubdirsInDir:
+			buf.WriteString(fmt.Sprintf("    flags_with_completion+=(%q)\n", name))
+
+			var ext string
+			if len(value) == 1 {
+				ext = fmt.Sprintf("__%s_handle_subdirs_in_dir_flag ", cmd.Root().Name()) + value[0]
+			} else {
+				ext = "_filedir -d"
+			}
+			buf.WriteString(fmt.Sprintf("    flags_completion+=(%q)\n", ext))
+		}
+	}
+}
+
+func writeShortFlag(buf *bytes.Buffer, flag *pflag.Flag, cmd *Command) {
+	name := flag.Shorthand
+	format := "    "
+	if len(flag.NoOptDefVal) == 0 {
+		format += "two_word_"
+	}
+	format += "flags+=(\"-%s\")\n"
+	buf.WriteString(fmt.Sprintf(format, name))
+	writeFlagHandler(buf, "-"+name, flag.Annotations, cmd)
+}
+
+func writeFlag(buf *bytes.Buffer, flag *pflag.Flag, cmd *Command) {
+	name := flag.Name
+	format := "    flags+=(\"--%s"
+	if len(flag.NoOptDefVal) == 0 {
+		format += "="
+	}
+	format += "\")\n"
+	buf.WriteString(fmt.Sprintf(format, name))
+	if len(flag.NoOptDefVal) == 0 {
+		format = "    two_word_flags+=(\"--%s\")\n"
+		buf.WriteString(fmt.Sprintf(format, name))
+	}
+	writeFlagHandler(buf, "--"+name, flag.Annotations, cmd)
+}
+
+func writeLocalNonPersistentFlag(buf *bytes.Buffer, flag *pflag.Flag) {
+	name := flag.Name
+	format := "    local_nonpersistent_flags+=(\"--%s"
+	if len(flag.NoOptDefVal) == 0 {
+		format += "="
+	}
+	format += "\")\n"
+	buf.WriteString(fmt.Sprintf(format, name))
+}
+
+func writeFlags(buf *bytes.Buffer, cmd *Command) {
+	buf.WriteString(`    flags=()
+    two_word_flags=()
+    local_nonpersistent_flags=()
+    flags_with_completion=()
+    flags_completion=()
+
+`)
+	localNonPersistentFlags := cmd.LocalNonPersistentFlags()
+	cmd.NonInheritedFlags().VisitAll(func(flag *pflag.Flag) {
+		if nonCompletableFlag(flag) {
+			return
+		}
+		writeFlag(buf, flag, cmd)
+		if len(flag.Shorthand) > 0 {
+			writeShortFlag(buf, flag, cmd)
+		}
+		if localNonPersistentFlags.Lookup(flag.Name) != nil {
+			writeLocalNonPersistentFlag(buf, flag)
+		}
+	})
+	cmd.InheritedFlags().VisitAll(func(flag *pflag.Flag) {
+		if nonCompletableFlag(flag) {
+			return
+		}
+		writeFlag(buf, flag, cmd)
+		if len(flag.Shorthand) > 0 {
+			writeShortFlag(buf, flag, cmd)
+		}
+	})
+
+	buf.WriteString("\n")
+}
+
+func writeRequiredFlag(buf *bytes.Buffer, cmd *Command) {
+	buf.WriteString("    must_have_one_flag=()\n")
+	flags := cmd.NonInheritedFlags()
+	flags.VisitAll(func(flag *pflag.Flag) {
+		if nonCompletableFlag(flag) {
+			return
+		}
+		for key := range flag.Annotations {
+			switch key {
+			case BashCompOneRequiredFlag:
+				format := "    must_have_one_flag+=(\"--%s"
+				if flag.Value.Type() != "bool" {
+					format += "="
+				}
+				format += "\")\n"
+				buf.WriteString(fmt.Sprintf(format, flag.Name))
+
+				if len(flag.Shorthand) > 0 {
+					buf.WriteString(fmt.Sprintf("    must_have_one_flag+=(\"-%s\")\n", flag.Shorthand))
+				}
+			}
+		}
+	})
+}
+
+func writeRequiredNouns(buf *bytes.Buffer, cmd *Command) {
+	buf.WriteString("    must_have_one_noun=()\n")
+	sort.Sort(sort.StringSlice(cmd.ValidArgs))
+	for _, value := range cmd.ValidArgs {
+		buf.WriteString(fmt.Sprintf("    must_have_one_noun+=(%q)\n", value))
+	}
+}
+
+func writeCmdAliases(buf *bytes.Buffer, cmd *Command) {
+	if len(cmd.Aliases) == 0 {
+		return
+	}
+
+	sort.Sort(sort.StringSlice(cmd.Aliases))
+
+	buf.WriteString(fmt.Sprint(`    if [[ -z "${BASH_VERSION}" || "${BASH_VERSINFO[0]}" -gt 3 ]]; then`, "\n"))
+	for _, value := range cmd.Aliases {
+		buf.WriteString(fmt.Sprintf("        command_aliases+=(%q)\n", value))
+		buf.WriteString(fmt.Sprintf("        aliashash[%q]=%q\n", value, cmd.Name()))
+	}
+	buf.WriteString(`    fi`)
+	buf.WriteString("\n")
+}
+func writeArgAliases(buf *bytes.Buffer, cmd *Command) {
+	buf.WriteString("    noun_aliases=()\n")
+	sort.Sort(sort.StringSlice(cmd.ArgAliases))
+	for _, value := range cmd.ArgAliases {
+		buf.WriteString(fmt.Sprintf("    noun_aliases+=(%q)\n", value))
+	}
+}
+
+func gen(buf *bytes.Buffer, cmd *Command) {
+	for _, c := range cmd.Commands() {
+		if !c.IsAvailableCommand() || c == cmd.helpCommand {
+			continue
+		}
+		gen(buf, c)
+	}
+	commandName := cmd.CommandPath()
+	commandName = strings.Replace(commandName, " ", "_", -1)
+	commandName = strings.Replace(commandName, ":", "__", -1)
+
+	if cmd.Root() == cmd {
+		buf.WriteString(fmt.Sprintf("_%s_root_command()\n{\n", commandName))
+	} else {
+		buf.WriteString(fmt.Sprintf("_%s()\n{\n", commandName))
+	}
+
+	buf.WriteString(fmt.Sprintf("    last_command=%q\n", commandName))
+	buf.WriteString("\n")
+	buf.WriteString("    command_aliases=()\n")
+	buf.WriteString("\n")
+
+	writeCommands(buf, cmd)
+	writeFlags(buf, cmd)
+	writeRequiredFlag(buf, cmd)
+	writeRequiredNouns(buf, cmd)
+	writeArgAliases(buf, cmd)
+	buf.WriteString("}\n\n")
+}
+
+// GenBashCompletion generates bash completion file and writes to the passed writer.
+func (c *Command) GenBashCompletion(w io.Writer) error {
+	buf := new(bytes.Buffer)
+	writePreamble(buf, c.Name())
+	if len(c.BashCompletionFunction) > 0 {
+		buf.WriteString(c.BashCompletionFunction + "\n")
+	}
+	gen(buf, c)
+	writePostscript(buf, c.Name())
+
+	_, err := buf.WriteTo(w)
+	return err
+}
+
+func nonCompletableFlag(flag *pflag.Flag) bool {
+	return flag.Hidden || len(flag.Deprecated) > 0
+}
+
+// GenBashCompletionFile generates bash completion file.
+func (c *Command) GenBashCompletionFile(filename string) error {
+	outFile, err := os.Create(filename)
+	if err != nil {
+		return err
+	}
+	defer outFile.Close()
+
+	return c.GenBashCompletion(outFile)
+}
diff --git a/vendor/github.com/spf13/cobra/bash_completions.md b/vendor/github.com/spf13/cobra/bash_completions.md
new file mode 100644
index 00000000..4ac61ee1
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/bash_completions.md
@@ -0,0 +1,256 @@
+# Generating Bash Completions For Your Own cobra.Command
+
+If you are using the generator you can create a completion command by running
+
+```bash
+cobra add completion
+```
+
+Update the help text show how to install the bash_completion Linux show here [Kubectl docs show mac options](https://kubernetes.io/docs/tasks/tools/install-kubectl/#enabling-shell-autocompletion)
+
+Writing the shell script to stdout allows the most flexible use.
+
+```go
+// completionCmd represents the completion command
+var completionCmd = &cobra.Command{
+	Use:   "completion",
+	Short: "Generates bash completion scripts",
+	Long: `To load completion run
+
+. <(bitbucket completion)
+
+To configure your bash shell to load completions for each session add to your bashrc
+
+# ~/.bashrc or ~/.profile
+. <(bitbucket completion)
+`,
+	Run: func(cmd *cobra.Command, args []string) {
+		rootCmd.GenBashCompletion(os.Stdout);
+	},
+}
+```
+
+**Note:** The cobra generator may include messages printed to stdout for example if the config file is loaded, this will break the auto complete script
+
+
+## Example from kubectl
+
+Generating bash completions from a cobra command is incredibly easy. An actual program which does so for the kubernetes kubectl binary is as follows:
+
+```go
+package main
+
+import (
+	"io/ioutil"
+	"os"
+
+	"k8s.io/kubernetes/pkg/kubectl/cmd"
+	"k8s.io/kubernetes/pkg/kubectl/cmd/util"
+)
+
+func main() {
+	kubectl := cmd.NewKubectlCommand(util.NewFactory(nil), os.Stdin, ioutil.Discard, ioutil.Discard)
+	kubectl.GenBashCompletionFile("out.sh")
+}
+```
+
+`out.sh` will get you completions of subcommands and flags. Copy it to `/etc/bash_completion.d/` as described [here](https://debian-administration.org/article/316/An_introduction_to_bash_completion_part_1) and reset your terminal to use autocompletion. If you make additional annotations to your code, you can get even more intelligent and flexible behavior.
+
+## Creating your own custom functions
+
+Some more actual code that works in kubernetes:
+
+```bash
+const (
+        bash_completion_func = `__kubectl_parse_get()
+{
+    local kubectl_output out
+    if kubectl_output=$(kubectl get --no-headers "$1" 2>/dev/null); then
+        out=($(echo "${kubectl_output}" | awk '{print $1}'))
+        COMPREPLY=( $( compgen -W "${out[*]}" -- "$cur" ) )
+    fi
+}
+
+__kubectl_get_resource()
+{
+    if [[ ${#nouns[@]} -eq 0 ]]; then
+        return 1
+    fi
+    __kubectl_parse_get ${nouns[${#nouns[@]} -1]}
+    if [[ $? -eq 0 ]]; then
+        return 0
+    fi
+}
+
+__kubectl_custom_func() {
+    case ${last_command} in
+        kubectl_get | kubectl_describe | kubectl_delete | kubectl_stop)
+            __kubectl_get_resource
+            return
+            ;;
+        *)
+            ;;
+    esac
+}
+`)
+```
+
+And then I set that in my command definition:
+
+```go
+cmds := &cobra.Command{
+	Use:   "kubectl",
+	Short: "kubectl controls the Kubernetes cluster manager",
+	Long: `kubectl controls the Kubernetes cluster manager.
+
+Find more information at https://github.com/GoogleCloudPlatform/kubernetes.`,
+	Run: runHelp,
+	BashCompletionFunction: bash_completion_func,
+}
+```
+
+The `BashCompletionFunction` option is really only valid/useful on the root command. Doing the above will cause `__kubectl_custom_func()` (`__<command-use>_custom_func()`) to be called when the built in processor was unable to find a solution. In the case of kubernetes a valid command might look something like `kubectl get pod [mypod]`. If you type `kubectl get pod [tab][tab]` the `__kubectl_customc_func()` will run because the cobra.Command only understood "kubectl" and "get." `__kubectl_custom_func()` will see that the cobra.Command is "kubectl_get" and will thus call another helper `__kubectl_get_resource()`.  `__kubectl_get_resource` will look at the 'nouns' collected. In our example the only noun will be `pod`.  So it will call `__kubectl_parse_get pod`.  `__kubectl_parse_get` will actually call out to kubernetes and get any pods.  It will then set `COMPREPLY` to valid pods!
+
+## Have the completions code complete your 'nouns'
+
+In the above example "pod" was assumed to already be typed. But if you want `kubectl get [tab][tab]` to show a list of valid "nouns" you have to set them. Simplified code from `kubectl get` looks like:
+
+```go
+validArgs []string = { "pod", "node", "service", "replicationcontroller" }
+
+cmd := &cobra.Command{
+	Use:     "get [(-o|--output=)json|yaml|template|...] (RESOURCE [NAME] | RESOURCE/NAME ...)",
+	Short:   "Display one or many resources",
+	Long:    get_long,
+	Example: get_example,
+	Run: func(cmd *cobra.Command, args []string) {
+		err := RunGet(f, out, cmd, args)
+		util.CheckErr(err)
+	},
+	ValidArgs: validArgs,
+}
+```
+
+Notice we put the "ValidArgs" on the "get" subcommand. Doing so will give results like
+
+```bash
+# kubectl get [tab][tab]
+node                 pod                    replicationcontroller  service
+```
+
+## Plural form and shortcuts for nouns
+
+If your nouns have a number of aliases, you can define them alongside `ValidArgs` using `ArgAliases`:
+
+```go
+argAliases []string = { "pods", "nodes", "services", "svc", "replicationcontrollers", "rc" }
+
+cmd := &cobra.Command{
+    ...
+	ValidArgs:  validArgs,
+	ArgAliases: argAliases
+}
+```
+
+The aliases are not shown to the user on tab completion, but they are accepted as valid nouns by
+the completion algorithm if entered manually, e.g. in:
+
+```bash
+# kubectl get rc [tab][tab]
+backend        frontend       database 
+```
+
+Note that without declaring `rc` as an alias, the completion algorithm would show the list of nouns
+in this example again instead of the replication controllers.
+
+## Mark flags as required
+
+Most of the time completions will only show subcommands. But if a flag is required to make a subcommand work, you probably want it to show up when the user types [tab][tab].  Marking a flag as 'Required' is incredibly easy.
+
+```go
+cmd.MarkFlagRequired("pod")
+cmd.MarkFlagRequired("container")
+```
+
+and you'll get something like
+
+```bash
+# kubectl exec [tab][tab][tab]
+-c            --container=  -p            --pod=  
+```
+
+# Specify valid filename extensions for flags that take a filename
+
+In this example we use --filename= and expect to get a json or yaml file as the argument. To make this easier we annotate the --filename flag with valid filename extensions.
+
+```go
+	annotations := []string{"json", "yaml", "yml"}
+	annotation := make(map[string][]string)
+	annotation[cobra.BashCompFilenameExt] = annotations
+
+	flag := &pflag.Flag{
+		Name:        "filename",
+		Shorthand:   "f",
+		Usage:       usage,
+		Value:       value,
+		DefValue:    value.String(),
+		Annotations: annotation,
+	}
+	cmd.Flags().AddFlag(flag)
+```
+
+Now when you run a command with this filename flag you'll get something like
+
+```bash
+# kubectl create -f 
+test/                         example/                      rpmbuild/
+hello.yml                     test.json
+```
+
+So while there are many other files in the CWD it only shows me subdirs and those with valid extensions.
+
+# Specify custom flag completion
+
+Similar to the filename completion and filtering using cobra.BashCompFilenameExt, you can specify
+a custom flag completion function with cobra.BashCompCustom:
+
+```go
+	annotation := make(map[string][]string)
+	annotation[cobra.BashCompCustom] = []string{"__kubectl_get_namespaces"}
+
+	flag := &pflag.Flag{
+		Name:        "namespace",
+		Usage:       usage,
+		Annotations: annotation,
+	}
+	cmd.Flags().AddFlag(flag)
+```
+
+In addition add the `__handle_namespace_flag` implementation in the `BashCompletionFunction`
+value, e.g.:
+
+```bash
+__kubectl_get_namespaces()
+{
+    local template
+    template="{{ range .items  }}{{ .metadata.name }} {{ end }}"
+    local kubectl_out
+    if kubectl_out=$(kubectl get -o template --template="${template}" namespace 2>/dev/null); then
+        COMPREPLY=( $( compgen -W "${kubectl_out}[*]" -- "$cur" ) )
+    fi
+}
+```
+# Using bash aliases for commands
+
+You can also configure the `bash aliases` for the commands and they will also support completions.
+
+```bash
+alias aliasname=origcommand
+complete -o default -F __start_origcommand aliasname
+
+# and now when you run `aliasname` completion will make
+# suggestions as it did for `origcommand`.
+
+$) aliasname <tab><tab>
+completion     firstcommand   secondcommand
+```
diff --git a/vendor/github.com/spf13/cobra/cobra.go b/vendor/github.com/spf13/cobra/cobra.go
new file mode 100644
index 00000000..6505c070
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/cobra.go
@@ -0,0 +1,207 @@
+// Copyright © 2013 Steve Francia <spf@spf13.com>.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Commands similar to git, go tools and other modern CLI tools
+// inspired by go, go-Commander, gh and subcommand
+
+package cobra
+
+import (
+	"fmt"
+	"io"
+	"reflect"
+	"strconv"
+	"strings"
+	"text/template"
+	"time"
+	"unicode"
+)
+
+var templateFuncs = template.FuncMap{
+	"trim":                    strings.TrimSpace,
+	"trimRightSpace":          trimRightSpace,
+	"trimTrailingWhitespaces": trimRightSpace,
+	"appendIfNotPresent":      appendIfNotPresent,
+	"rpad":                    rpad,
+	"gt":                      Gt,
+	"eq":                      Eq,
+}
+
+var initializers []func()
+
+// EnablePrefixMatching allows to set automatic prefix matching. Automatic prefix matching can be a dangerous thing
+// to automatically enable in CLI tools.
+// Set this to true to enable it.
+var EnablePrefixMatching = false
+
+// EnableCommandSorting controls sorting of the slice of commands, which is turned on by default.
+// To disable sorting, set it to false.
+var EnableCommandSorting = true
+
+// MousetrapHelpText enables an information splash screen on Windows
+// if the CLI is started from explorer.exe.
+// To disable the mousetrap, just set this variable to blank string ("").
+// Works only on Microsoft Windows.
+var MousetrapHelpText string = `This is a command line tool.
+
+You need to open cmd.exe and run it from there.
+`
+
+// MousetrapDisplayDuration controls how long the MousetrapHelpText message is displayed on Windows
+// if the CLI is started from explorer.exe. Set to 0 to wait for the return key to be pressed.
+// To disable the mousetrap, just set MousetrapHelpText to blank string ("").
+// Works only on Microsoft Windows.
+var MousetrapDisplayDuration time.Duration = 5 * time.Second
+
+// AddTemplateFunc adds a template function that's available to Usage and Help
+// template generation.
+func AddTemplateFunc(name string, tmplFunc interface{}) {
+	templateFuncs[name] = tmplFunc
+}
+
+// AddTemplateFuncs adds multiple template functions that are available to Usage and
+// Help template generation.
+func AddTemplateFuncs(tmplFuncs template.FuncMap) {
+	for k, v := range tmplFuncs {
+		templateFuncs[k] = v
+	}
+}
+
+// OnInitialize sets the passed functions to be run when each command's
+// Execute method is called.
+func OnInitialize(y ...func()) {
+	initializers = append(initializers, y...)
+}
+
+// FIXME Gt is unused by cobra and should be removed in a version 2. It exists only for compatibility with users of cobra.
+
+// Gt takes two types and checks whether the first type is greater than the second. In case of types Arrays, Chans,
+// Maps and Slices, Gt will compare their lengths. Ints are compared directly while strings are first parsed as
+// ints and then compared.
+func Gt(a interface{}, b interface{}) bool {
+	var left, right int64
+	av := reflect.ValueOf(a)
+
+	switch av.Kind() {
+	case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice:
+		left = int64(av.Len())
+	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+		left = av.Int()
+	case reflect.String:
+		left, _ = strconv.ParseInt(av.String(), 10, 64)
+	}
+
+	bv := reflect.ValueOf(b)
+
+	switch bv.Kind() {
+	case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice:
+		right = int64(bv.Len())
+	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+		right = bv.Int()
+	case reflect.String:
+		right, _ = strconv.ParseInt(bv.String(), 10, 64)
+	}
+
+	return left > right
+}
+
+// FIXME Eq is unused by cobra and should be removed in a version 2. It exists only for compatibility with users of cobra.
+
+// Eq takes two types and checks whether they are equal. Supported types are int and string. Unsupported types will panic.
+func Eq(a interface{}, b interface{}) bool {
+	av := reflect.ValueOf(a)
+	bv := reflect.ValueOf(b)
+
+	switch av.Kind() {
+	case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice:
+		panic("Eq called on unsupported type")
+	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+		return av.Int() == bv.Int()
+	case reflect.String:
+		return av.String() == bv.String()
+	}
+	return false
+}
+
+func trimRightSpace(s string) string {
+	return strings.TrimRightFunc(s, unicode.IsSpace)
+}
+
+// FIXME appendIfNotPresent is unused by cobra and should be removed in a version 2. It exists only for compatibility with users of cobra.
+
+// appendIfNotPresent will append stringToAppend to the end of s, but only if it's not yet present in s.
+func appendIfNotPresent(s, stringToAppend string) string {
+	if strings.Contains(s, stringToAppend) {
+		return s
+	}
+	return s + " " + stringToAppend
+}
+
+// rpad adds padding to the right of a string.
+func rpad(s string, padding int) string {
+	template := fmt.Sprintf("%%-%ds", padding)
+	return fmt.Sprintf(template, s)
+}
+
+// tmpl executes the given template text on data, writing the result to w.
+func tmpl(w io.Writer, text string, data interface{}) error {
+	t := template.New("top")
+	t.Funcs(templateFuncs)
+	template.Must(t.Parse(text))
+	return t.Execute(w, data)
+}
+
+// ld compares two strings and returns the levenshtein distance between them.
+func ld(s, t string, ignoreCase bool) int {
+	if ignoreCase {
+		s = strings.ToLower(s)
+		t = strings.ToLower(t)
+	}
+	d := make([][]int, len(s)+1)
+	for i := range d {
+		d[i] = make([]int, len(t)+1)
+	}
+	for i := range d {
+		d[i][0] = i
+	}
+	for j := range d[0] {
+		d[0][j] = j
+	}
+	for j := 1; j <= len(t); j++ {
+		for i := 1; i <= len(s); i++ {
+			if s[i-1] == t[j-1] {
+				d[i][j] = d[i-1][j-1]
+			} else {
+				min := d[i-1][j]
+				if d[i][j-1] < min {
+					min = d[i][j-1]
+				}
+				if d[i-1][j-1] < min {
+					min = d[i-1][j-1]
+				}
+				d[i][j] = min + 1
+			}
+		}
+
+	}
+	return d[len(s)][len(t)]
+}
+
+func stringInSlice(a string, list []string) bool {
+	for _, b := range list {
+		if b == a {
+			return true
+		}
+	}
+	return false
+}
diff --git a/vendor/github.com/spf13/cobra/command.go b/vendor/github.com/spf13/cobra/command.go
new file mode 100644
index 00000000..c7e89830
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/command.go
@@ -0,0 +1,1594 @@
+// Copyright © 2013 Steve Francia <spf@spf13.com>.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package cobra is a commander providing a simple interface to create powerful modern CLI interfaces.
+// In addition to providing an interface, Cobra simultaneously provides a controller to organize your application code.
+package cobra
+
+import (
+	"bytes"
+	"fmt"
+	"io"
+	"os"
+	"path/filepath"
+	"sort"
+	"strings"
+
+	flag "github.com/spf13/pflag"
+)
+
+// FParseErrWhitelist configures Flag parse errors to be ignored
+type FParseErrWhitelist flag.ParseErrorsWhitelist
+
+// Command is just that, a command for your application.
+// E.g.  'go run ...' - 'run' is the command. Cobra requires
+// you to define the usage and description as part of your command
+// definition to ensure usability.
+type Command struct {
+	// Use is the one-line usage message.
+	Use string
+
+	// Aliases is an array of aliases that can be used instead of the first word in Use.
+	Aliases []string
+
+	// SuggestFor is an array of command names for which this command will be suggested -
+	// similar to aliases but only suggests.
+	SuggestFor []string
+
+	// Short is the short description shown in the 'help' output.
+	Short string
+
+	// Long is the long message shown in the 'help <this-command>' output.
+	Long string
+
+	// Example is examples of how to use the command.
+	Example string
+
+	// ValidArgs is list of all valid non-flag arguments that are accepted in bash completions
+	ValidArgs []string
+
+	// Expected arguments
+	Args PositionalArgs
+
+	// ArgAliases is List of aliases for ValidArgs.
+	// These are not suggested to the user in the bash completion,
+	// but accepted if entered manually.
+	ArgAliases []string
+
+	// BashCompletionFunction is custom functions used by the bash autocompletion generator.
+	BashCompletionFunction string
+
+	// Deprecated defines, if this command is deprecated and should print this string when used.
+	Deprecated string
+
+	// Hidden defines, if this command is hidden and should NOT show up in the list of available commands.
+	Hidden bool
+
+	// Annotations are key/value pairs that can be used by applications to identify or
+	// group commands.
+	Annotations map[string]string
+
+	// Version defines the version for this command. If this value is non-empty and the command does not
+	// define a "version" flag, a "version" boolean flag will be added to the command and, if specified,
+	// will print content of the "Version" variable.
+	Version string
+
+	// The *Run functions are executed in the following order:
+	//   * PersistentPreRun()
+	//   * PreRun()
+	//   * Run()
+	//   * PostRun()
+	//   * PersistentPostRun()
+	// All functions get the same args, the arguments after the command name.
+	//
+	// PersistentPreRun: children of this command will inherit and execute.
+	PersistentPreRun func(cmd *Command, args []string)
+	// PersistentPreRunE: PersistentPreRun but returns an error.
+	PersistentPreRunE func(cmd *Command, args []string) error
+	// PreRun: children of this command will not inherit.
+	PreRun func(cmd *Command, args []string)
+	// PreRunE: PreRun but returns an error.
+	PreRunE func(cmd *Command, args []string) error
+	// Run: Typically the actual work function. Most commands will only implement this.
+	Run func(cmd *Command, args []string)
+	// RunE: Run but returns an error.
+	RunE func(cmd *Command, args []string) error
+	// PostRun: run after the Run command.
+	PostRun func(cmd *Command, args []string)
+	// PostRunE: PostRun but returns an error.
+	PostRunE func(cmd *Command, args []string) error
+	// PersistentPostRun: children of this command will inherit and execute after PostRun.
+	PersistentPostRun func(cmd *Command, args []string)
+	// PersistentPostRunE: PersistentPostRun but returns an error.
+	PersistentPostRunE func(cmd *Command, args []string) error
+
+	// SilenceErrors is an option to quiet errors down stream.
+	SilenceErrors bool
+
+	// SilenceUsage is an option to silence usage when an error occurs.
+	SilenceUsage bool
+
+	// DisableFlagParsing disables the flag parsing.
+	// If this is true all flags will be passed to the command as arguments.
+	DisableFlagParsing bool
+
+	// DisableAutoGenTag defines, if gen tag ("Auto generated by spf13/cobra...")
+	// will be printed by generating docs for this command.
+	DisableAutoGenTag bool
+
+	// DisableFlagsInUseLine will disable the addition of [flags] to the usage
+	// line of a command when printing help or generating docs
+	DisableFlagsInUseLine bool
+
+	// DisableSuggestions disables the suggestions based on Levenshtein distance
+	// that go along with 'unknown command' messages.
+	DisableSuggestions bool
+	// SuggestionsMinimumDistance defines minimum levenshtein distance to display suggestions.
+	// Must be > 0.
+	SuggestionsMinimumDistance int
+
+	// TraverseChildren parses flags on all parents before executing child command.
+	TraverseChildren bool
+
+	//FParseErrWhitelist flag parse errors to be ignored
+	FParseErrWhitelist FParseErrWhitelist
+
+	// commands is the list of commands supported by this program.
+	commands []*Command
+	// parent is a parent command for this command.
+	parent *Command
+	// Max lengths of commands' string lengths for use in padding.
+	commandsMaxUseLen         int
+	commandsMaxCommandPathLen int
+	commandsMaxNameLen        int
+	// commandsAreSorted defines, if command slice are sorted or not.
+	commandsAreSorted bool
+	// commandCalledAs is the name or alias value used to call this command.
+	commandCalledAs struct {
+		name   string
+		called bool
+	}
+
+	// args is actual args parsed from flags.
+	args []string
+	// flagErrorBuf contains all error messages from pflag.
+	flagErrorBuf *bytes.Buffer
+	// flags is full set of flags.
+	flags *flag.FlagSet
+	// pflags contains persistent flags.
+	pflags *flag.FlagSet
+	// lflags contains local flags.
+	lflags *flag.FlagSet
+	// iflags contains inherited flags.
+	iflags *flag.FlagSet
+	// parentsPflags is all persistent flags of cmd's parents.
+	parentsPflags *flag.FlagSet
+	// globNormFunc is the global normalization function
+	// that we can use on every pflag set and children commands
+	globNormFunc func(f *flag.FlagSet, name string) flag.NormalizedName
+
+	// usageFunc is usage func defined by user.
+	usageFunc func(*Command) error
+	// usageTemplate is usage template defined by user.
+	usageTemplate string
+	// flagErrorFunc is func defined by user and it's called when the parsing of
+	// flags returns an error.
+	flagErrorFunc func(*Command, error) error
+	// helpTemplate is help template defined by user.
+	helpTemplate string
+	// helpFunc is help func defined by user.
+	helpFunc func(*Command, []string)
+	// helpCommand is command with usage 'help'. If it's not defined by user,
+	// cobra uses default help command.
+	helpCommand *Command
+	// versionTemplate is the version template defined by user.
+	versionTemplate string
+
+	// inReader is a reader defined by the user that replaces stdin
+	inReader io.Reader
+	// outWriter is a writer defined by the user that replaces stdout
+	outWriter io.Writer
+	// errWriter is a writer defined by the user that replaces stderr
+	errWriter io.Writer
+}
+
+// SetArgs sets arguments for the command. It is set to os.Args[1:] by default, if desired, can be overridden
+// particularly useful when testing.
+func (c *Command) SetArgs(a []string) {
+	c.args = a
+}
+
+// SetOutput sets the destination for usage and error messages.
+// If output is nil, os.Stderr is used.
+// Deprecated: Use SetOut and/or SetErr instead
+func (c *Command) SetOutput(output io.Writer) {
+	c.outWriter = output
+	c.errWriter = output
+}
+
+// SetOut sets the destination for usage messages.
+// If newOut is nil, os.Stdout is used.
+func (c *Command) SetOut(newOut io.Writer) {
+	c.outWriter = newOut
+}
+
+// SetErr sets the destination for error messages.
+// If newErr is nil, os.Stderr is used.
+func (c *Command) SetErr(newErr io.Writer) {
+	c.errWriter = newErr
+}
+
+// SetOut sets the source for input data
+// If newIn is nil, os.Stdin is used.
+func (c *Command) SetIn(newIn io.Reader) {
+	c.inReader = newIn
+}
+
+// SetUsageFunc sets usage function. Usage can be defined by application.
+func (c *Command) SetUsageFunc(f func(*Command) error) {
+	c.usageFunc = f
+}
+
+// SetUsageTemplate sets usage template. Can be defined by Application.
+func (c *Command) SetUsageTemplate(s string) {
+	c.usageTemplate = s
+}
+
+// SetFlagErrorFunc sets a function to generate an error when flag parsing
+// fails.
+func (c *Command) SetFlagErrorFunc(f func(*Command, error) error) {
+	c.flagErrorFunc = f
+}
+
+// SetHelpFunc sets help function. Can be defined by Application.
+func (c *Command) SetHelpFunc(f func(*Command, []string)) {
+	c.helpFunc = f
+}
+
+// SetHelpCommand sets help command.
+func (c *Command) SetHelpCommand(cmd *Command) {
+	c.helpCommand = cmd
+}
+
+// SetHelpTemplate sets help template to be used. Application can use it to set custom template.
+func (c *Command) SetHelpTemplate(s string) {
+	c.helpTemplate = s
+}
+
+// SetVersionTemplate sets version template to be used. Application can use it to set custom template.
+func (c *Command) SetVersionTemplate(s string) {
+	c.versionTemplate = s
+}
+
+// SetGlobalNormalizationFunc sets a normalization function to all flag sets and also to child commands.
+// The user should not have a cyclic dependency on commands.
+func (c *Command) SetGlobalNormalizationFunc(n func(f *flag.FlagSet, name string) flag.NormalizedName) {
+	c.Flags().SetNormalizeFunc(n)
+	c.PersistentFlags().SetNormalizeFunc(n)
+	c.globNormFunc = n
+
+	for _, command := range c.commands {
+		command.SetGlobalNormalizationFunc(n)
+	}
+}
+
+// OutOrStdout returns output to stdout.
+func (c *Command) OutOrStdout() io.Writer {
+	return c.getOut(os.Stdout)
+}
+
+// OutOrStderr returns output to stderr
+func (c *Command) OutOrStderr() io.Writer {
+	return c.getOut(os.Stderr)
+}
+
+// ErrOrStderr returns output to stderr
+func (c *Command) ErrOrStderr() io.Writer {
+	return c.getErr(os.Stderr)
+}
+
+// ErrOrStderr returns output to stderr
+func (c *Command) InOrStdin() io.Reader {
+	return c.getIn(os.Stdin)
+}
+
+func (c *Command) getOut(def io.Writer) io.Writer {
+	if c.outWriter != nil {
+		return c.outWriter
+	}
+	if c.HasParent() {
+		return c.parent.getOut(def)
+	}
+	return def
+}
+
+func (c *Command) getErr(def io.Writer) io.Writer {
+	if c.errWriter != nil {
+		return c.errWriter
+	}
+	if c.HasParent() {
+		return c.parent.getErr(def)
+	}
+	return def
+}
+
+func (c *Command) getIn(def io.Reader) io.Reader {
+	if c.inReader != nil {
+		return c.inReader
+	}
+	if c.HasParent() {
+		return c.parent.getIn(def)
+	}
+	return def
+}
+
+// UsageFunc returns either the function set by SetUsageFunc for this command
+// or a parent, or it returns a default usage function.
+func (c *Command) UsageFunc() (f func(*Command) error) {
+	if c.usageFunc != nil {
+		return c.usageFunc
+	}
+	if c.HasParent() {
+		return c.Parent().UsageFunc()
+	}
+	return func(c *Command) error {
+		c.mergePersistentFlags()
+		err := tmpl(c.OutOrStderr(), c.UsageTemplate(), c)
+		if err != nil {
+			c.Println(err)
+		}
+		return err
+	}
+}
+
+// Usage puts out the usage for the command.
+// Used when a user provides invalid input.
+// Can be defined by user by overriding UsageFunc.
+func (c *Command) Usage() error {
+	return c.UsageFunc()(c)
+}
+
+// HelpFunc returns either the function set by SetHelpFunc for this command
+// or a parent, or it returns a function with default help behavior.
+func (c *Command) HelpFunc() func(*Command, []string) {
+	if c.helpFunc != nil {
+		return c.helpFunc
+	}
+	if c.HasParent() {
+		return c.Parent().HelpFunc()
+	}
+	return func(c *Command, a []string) {
+		c.mergePersistentFlags()
+		err := tmpl(c.OutOrStdout(), c.HelpTemplate(), c)
+		if err != nil {
+			c.Println(err)
+		}
+	}
+}
+
+// Help puts out the help for the command.
+// Used when a user calls help [command].
+// Can be defined by user by overriding HelpFunc.
+func (c *Command) Help() error {
+	c.HelpFunc()(c, []string{})
+	return nil
+}
+
+// UsageString returns usage string.
+func (c *Command) UsageString() string {
+	// Storing normal writers
+	tmpOutput := c.outWriter
+	tmpErr := c.errWriter
+
+	bb := new(bytes.Buffer)
+	c.outWriter = bb
+	c.errWriter = bb
+
+	c.Usage()
+
+	// Setting things back to normal
+	c.outWriter = tmpOutput
+	c.errWriter = tmpErr
+
+	return bb.String()
+}
+
+// FlagErrorFunc returns either the function set by SetFlagErrorFunc for this
+// command or a parent, or it returns a function which returns the original
+// error.
+func (c *Command) FlagErrorFunc() (f func(*Command, error) error) {
+	if c.flagErrorFunc != nil {
+		return c.flagErrorFunc
+	}
+
+	if c.HasParent() {
+		return c.parent.FlagErrorFunc()
+	}
+	return func(c *Command, err error) error {
+		return err
+	}
+}
+
+var minUsagePadding = 25
+
+// UsagePadding return padding for the usage.
+func (c *Command) UsagePadding() int {
+	if c.parent == nil || minUsagePadding > c.parent.commandsMaxUseLen {
+		return minUsagePadding
+	}
+	return c.parent.commandsMaxUseLen
+}
+
+var minCommandPathPadding = 11
+
+// CommandPathPadding return padding for the command path.
+func (c *Command) CommandPathPadding() int {
+	if c.parent == nil || minCommandPathPadding > c.parent.commandsMaxCommandPathLen {
+		return minCommandPathPadding
+	}
+	return c.parent.commandsMaxCommandPathLen
+}
+
+var minNamePadding = 11
+
+// NamePadding returns padding for the name.
+func (c *Command) NamePadding() int {
+	if c.parent == nil || minNamePadding > c.parent.commandsMaxNameLen {
+		return minNamePadding
+	}
+	return c.parent.commandsMaxNameLen
+}
+
+// UsageTemplate returns usage template for the command.
+func (c *Command) UsageTemplate() string {
+	if c.usageTemplate != "" {
+		return c.usageTemplate
+	}
+
+	if c.HasParent() {
+		return c.parent.UsageTemplate()
+	}
+	return `Usage:{{if .Runnable}}
+  {{.UseLine}}{{end}}{{if .HasAvailableSubCommands}}
+  {{.CommandPath}} [command]{{end}}{{if gt (len .Aliases) 0}}
+
+Aliases:
+  {{.NameAndAliases}}{{end}}{{if .HasExample}}
+
+Examples:
+{{.Example}}{{end}}{{if .HasAvailableSubCommands}}
+
+Available Commands:{{range .Commands}}{{if (or .IsAvailableCommand (eq .Name "help"))}}
+  {{rpad .Name .NamePadding }} {{.Short}}{{end}}{{end}}{{end}}{{if .HasAvailableLocalFlags}}
+
+Flags:
+{{.LocalFlags.FlagUsages | trimTrailingWhitespaces}}{{end}}{{if .HasAvailableInheritedFlags}}
+
+Global Flags:
+{{.InheritedFlags.FlagUsages | trimTrailingWhitespaces}}{{end}}{{if .HasHelpSubCommands}}
+
+Additional help topics:{{range .Commands}}{{if .IsAdditionalHelpTopicCommand}}
+  {{rpad .CommandPath .CommandPathPadding}} {{.Short}}{{end}}{{end}}{{end}}{{if .HasAvailableSubCommands}}
+
+Use "{{.CommandPath}} [command] --help" for more information about a command.{{end}}
+`
+}
+
+// HelpTemplate return help template for the command.
+func (c *Command) HelpTemplate() string {
+	if c.helpTemplate != "" {
+		return c.helpTemplate
+	}
+
+	if c.HasParent() {
+		return c.parent.HelpTemplate()
+	}
+	return `{{with (or .Long .Short)}}{{. | trimTrailingWhitespaces}}
+
+{{end}}{{if or .Runnable .HasSubCommands}}{{.UsageString}}{{end}}`
+}
+
+// VersionTemplate return version template for the command.
+func (c *Command) VersionTemplate() string {
+	if c.versionTemplate != "" {
+		return c.versionTemplate
+	}
+
+	if c.HasParent() {
+		return c.parent.VersionTemplate()
+	}
+	return `{{with .Name}}{{printf "%s " .}}{{end}}{{printf "version %s" .Version}}
+`
+}
+
+func hasNoOptDefVal(name string, fs *flag.FlagSet) bool {
+	flag := fs.Lookup(name)
+	if flag == nil {
+		return false
+	}
+	return flag.NoOptDefVal != ""
+}
+
+func shortHasNoOptDefVal(name string, fs *flag.FlagSet) bool {
+	if len(name) == 0 {
+		return false
+	}
+
+	flag := fs.ShorthandLookup(name[:1])
+	if flag == nil {
+		return false
+	}
+	return flag.NoOptDefVal != ""
+}
+
+func stripFlags(args []string, c *Command) []string {
+	if len(args) == 0 {
+		return args
+	}
+	c.mergePersistentFlags()
+
+	commands := []string{}
+	flags := c.Flags()
+
+Loop:
+	for len(args) > 0 {
+		s := args[0]
+		args = args[1:]
+		switch {
+		case s == "--":
+			// "--" terminates the flags
+			break Loop
+		case strings.HasPrefix(s, "--") && !strings.Contains(s, "=") && !hasNoOptDefVal(s[2:], flags):
+			// If '--flag arg' then
+			// delete arg from args.
+			fallthrough // (do the same as below)
+		case strings.HasPrefix(s, "-") && !strings.Contains(s, "=") && len(s) == 2 && !shortHasNoOptDefVal(s[1:], flags):
+			// If '-f arg' then
+			// delete 'arg' from args or break the loop if len(args) <= 1.
+			if len(args) <= 1 {
+				break Loop
+			} else {
+				args = args[1:]
+				continue
+			}
+		case s != "" && !strings.HasPrefix(s, "-"):
+			commands = append(commands, s)
+		}
+	}
+
+	return commands
+}
+
+// argsMinusFirstX removes only the first x from args.  Otherwise, commands that look like
+// openshift admin policy add-role-to-user admin my-user, lose the admin argument (arg[4]).
+func argsMinusFirstX(args []string, x string) []string {
+	for i, y := range args {
+		if x == y {
+			ret := []string{}
+			ret = append(ret, args[:i]...)
+			ret = append(ret, args[i+1:]...)
+			return ret
+		}
+	}
+	return args
+}
+
+func isFlagArg(arg string) bool {
+	return ((len(arg) >= 3 && arg[1] == '-') ||
+		(len(arg) >= 2 && arg[0] == '-' && arg[1] != '-'))
+}
+
+// Find the target command given the args and command tree
+// Meant to be run on the highest node. Only searches down.
+func (c *Command) Find(args []string) (*Command, []string, error) {
+	var innerfind func(*Command, []string) (*Command, []string)
+
+	innerfind = func(c *Command, innerArgs []string) (*Command, []string) {
+		argsWOflags := stripFlags(innerArgs, c)
+		if len(argsWOflags) == 0 {
+			return c, innerArgs
+		}
+		nextSubCmd := argsWOflags[0]
+
+		cmd := c.findNext(nextSubCmd)
+		if cmd != nil {
+			return innerfind(cmd, argsMinusFirstX(innerArgs, nextSubCmd))
+		}
+		return c, innerArgs
+	}
+
+	commandFound, a := innerfind(c, args)
+	if commandFound.Args == nil {
+		return commandFound, a, legacyArgs(commandFound, stripFlags(a, commandFound))
+	}
+	return commandFound, a, nil
+}
+
+func (c *Command) findSuggestions(arg string) string {
+	if c.DisableSuggestions {
+		return ""
+	}
+	if c.SuggestionsMinimumDistance <= 0 {
+		c.SuggestionsMinimumDistance = 2
+	}
+	suggestionsString := ""
+	if suggestions := c.SuggestionsFor(arg); len(suggestions) > 0 {
+		suggestionsString += "\n\nDid you mean this?\n"
+		for _, s := range suggestions {
+			suggestionsString += fmt.Sprintf("\t%v\n", s)
+		}
+	}
+	return suggestionsString
+}
+
+func (c *Command) findNext(next string) *Command {
+	matches := make([]*Command, 0)
+	for _, cmd := range c.commands {
+		if cmd.Name() == next || cmd.HasAlias(next) {
+			cmd.commandCalledAs.name = next
+			return cmd
+		}
+		if EnablePrefixMatching && cmd.hasNameOrAliasPrefix(next) {
+			matches = append(matches, cmd)
+		}
+	}
+
+	if len(matches) == 1 {
+		return matches[0]
+	}
+
+	return nil
+}
+
+// Traverse the command tree to find the command, and parse args for
+// each parent.
+func (c *Command) Traverse(args []string) (*Command, []string, error) {
+	flags := []string{}
+	inFlag := false
+
+	for i, arg := range args {
+		switch {
+		// A long flag with a space separated value
+		case strings.HasPrefix(arg, "--") && !strings.Contains(arg, "="):
+			// TODO: this isn't quite right, we should really check ahead for 'true' or 'false'
+			inFlag = !hasNoOptDefVal(arg[2:], c.Flags())
+			flags = append(flags, arg)
+			continue
+		// A short flag with a space separated value
+		case strings.HasPrefix(arg, "-") && !strings.Contains(arg, "=") && len(arg) == 2 && !shortHasNoOptDefVal(arg[1:], c.Flags()):
+			inFlag = true
+			flags = append(flags, arg)
+			continue
+		// The value for a flag
+		case inFlag:
+			inFlag = false
+			flags = append(flags, arg)
+			continue
+		// A flag without a value, or with an `=` separated value
+		case isFlagArg(arg):
+			flags = append(flags, arg)
+			continue
+		}
+
+		cmd := c.findNext(arg)
+		if cmd == nil {
+			return c, args, nil
+		}
+
+		if err := c.ParseFlags(flags); err != nil {
+			return nil, args, err
+		}
+		return cmd.Traverse(args[i+1:])
+	}
+	return c, args, nil
+}
+
+// SuggestionsFor provides suggestions for the typedName.
+func (c *Command) SuggestionsFor(typedName string) []string {
+	suggestions := []string{}
+	for _, cmd := range c.commands {
+		if cmd.IsAvailableCommand() {
+			levenshteinDistance := ld(typedName, cmd.Name(), true)
+			suggestByLevenshtein := levenshteinDistance <= c.SuggestionsMinimumDistance
+			suggestByPrefix := strings.HasPrefix(strings.ToLower(cmd.Name()), strings.ToLower(typedName))
+			if suggestByLevenshtein || suggestByPrefix {
+				suggestions = append(suggestions, cmd.Name())
+			}
+			for _, explicitSuggestion := range cmd.SuggestFor {
+				if strings.EqualFold(typedName, explicitSuggestion) {
+					suggestions = append(suggestions, cmd.Name())
+				}
+			}
+		}
+	}
+	return suggestions
+}
+
+// VisitParents visits all parents of the command and invokes fn on each parent.
+func (c *Command) VisitParents(fn func(*Command)) {
+	if c.HasParent() {
+		fn(c.Parent())
+		c.Parent().VisitParents(fn)
+	}
+}
+
+// Root finds root command.
+func (c *Command) Root() *Command {
+	if c.HasParent() {
+		return c.Parent().Root()
+	}
+	return c
+}
+
+// ArgsLenAtDash will return the length of c.Flags().Args at the moment
+// when a -- was found during args parsing.
+func (c *Command) ArgsLenAtDash() int {
+	return c.Flags().ArgsLenAtDash()
+}
+
+func (c *Command) execute(a []string) (err error) {
+	if c == nil {
+		return fmt.Errorf("Called Execute() on a nil Command")
+	}
+
+	if len(c.Deprecated) > 0 {
+		c.Printf("Command %q is deprecated, %s\n", c.Name(), c.Deprecated)
+	}
+
+	// initialize help and version flag at the last point possible to allow for user
+	// overriding
+	c.InitDefaultHelpFlag()
+	c.InitDefaultVersionFlag()
+
+	err = c.ParseFlags(a)
+	if err != nil {
+		return c.FlagErrorFunc()(c, err)
+	}
+
+	// If help is called, regardless of other flags, return we want help.
+	// Also say we need help if the command isn't runnable.
+	helpVal, err := c.Flags().GetBool("help")
+	if err != nil {
+		// should be impossible to get here as we always declare a help
+		// flag in InitDefaultHelpFlag()
+		c.Println("\"help\" flag declared as non-bool. Please correct your code")
+		return err
+	}
+
+	if helpVal {
+		return flag.ErrHelp
+	}
+
+	// for back-compat, only add version flag behavior if version is defined
+	if c.Version != "" {
+		versionVal, err := c.Flags().GetBool("version")
+		if err != nil {
+			c.Println("\"version\" flag declared as non-bool. Please correct your code")
+			return err
+		}
+		if versionVal {
+			err := tmpl(c.OutOrStdout(), c.VersionTemplate(), c)
+			if err != nil {
+				c.Println(err)
+			}
+			return err
+		}
+	}
+
+	if !c.Runnable() {
+		return flag.ErrHelp
+	}
+
+	c.preRun()
+
+	argWoFlags := c.Flags().Args()
+	if c.DisableFlagParsing {
+		argWoFlags = a
+	}
+
+	if err := c.ValidateArgs(argWoFlags); err != nil {
+		return err
+	}
+
+	for p := c; p != nil; p = p.Parent() {
+		if p.PersistentPreRunE != nil {
+			if err := p.PersistentPreRunE(c, argWoFlags); err != nil {
+				return err
+			}
+			break
+		} else if p.PersistentPreRun != nil {
+			p.PersistentPreRun(c, argWoFlags)
+			break
+		}
+	}
+	if c.PreRunE != nil {
+		if err := c.PreRunE(c, argWoFlags); err != nil {
+			return err
+		}
+	} else if c.PreRun != nil {
+		c.PreRun(c, argWoFlags)
+	}
+
+	if err := c.validateRequiredFlags(); err != nil {
+		return err
+	}
+	if c.RunE != nil {
+		if err := c.RunE(c, argWoFlags); err != nil {
+			return err
+		}
+	} else {
+		c.Run(c, argWoFlags)
+	}
+	if c.PostRunE != nil {
+		if err := c.PostRunE(c, argWoFlags); err != nil {
+			return err
+		}
+	} else if c.PostRun != nil {
+		c.PostRun(c, argWoFlags)
+	}
+	for p := c; p != nil; p = p.Parent() {
+		if p.PersistentPostRunE != nil {
+			if err := p.PersistentPostRunE(c, argWoFlags); err != nil {
+				return err
+			}
+			break
+		} else if p.PersistentPostRun != nil {
+			p.PersistentPostRun(c, argWoFlags)
+			break
+		}
+	}
+
+	return nil
+}
+
+func (c *Command) preRun() {
+	for _, x := range initializers {
+		x()
+	}
+}
+
+// Execute uses the args (os.Args[1:] by default)
+// and run through the command tree finding appropriate matches
+// for commands and then corresponding flags.
+func (c *Command) Execute() error {
+	_, err := c.ExecuteC()
+	return err
+}
+
+// ExecuteC executes the command.
+func (c *Command) ExecuteC() (cmd *Command, err error) {
+	// Regardless of what command execute is called on, run on Root only
+	if c.HasParent() {
+		return c.Root().ExecuteC()
+	}
+
+	// windows hook
+	if preExecHookFn != nil {
+		preExecHookFn(c)
+	}
+
+	// initialize help as the last point possible to allow for user
+	// overriding
+	c.InitDefaultHelpCmd()
+
+	args := c.args
+
+	// Workaround FAIL with "go test -v" or "cobra.test -test.v", see #155
+	if c.args == nil && filepath.Base(os.Args[0]) != "cobra.test" {
+		args = os.Args[1:]
+	}
+
+	var flags []string
+	if c.TraverseChildren {
+		cmd, flags, err = c.Traverse(args)
+	} else {
+		cmd, flags, err = c.Find(args)
+	}
+	if err != nil {
+		// If found parse to a subcommand and then failed, talk about the subcommand
+		if cmd != nil {
+			c = cmd
+		}
+		if !c.SilenceErrors {
+			c.Println("Error:", err.Error())
+			c.Printf("Run '%v --help' for usage.\n", c.CommandPath())
+		}
+		return c, err
+	}
+
+	cmd.commandCalledAs.called = true
+	if cmd.commandCalledAs.name == "" {
+		cmd.commandCalledAs.name = cmd.Name()
+	}
+
+	err = cmd.execute(flags)
+	if err != nil {
+		// Always show help if requested, even if SilenceErrors is in
+		// effect
+		if err == flag.ErrHelp {
+			cmd.HelpFunc()(cmd, args)
+			return cmd, nil
+		}
+
+		// If root command has SilentErrors flagged,
+		// all subcommands should respect it
+		if !cmd.SilenceErrors && !c.SilenceErrors {
+			c.Println("Error:", err.Error())
+		}
+
+		// If root command has SilentUsage flagged,
+		// all subcommands should respect it
+		if !cmd.SilenceUsage && !c.SilenceUsage {
+			c.Println(cmd.UsageString())
+		}
+	}
+	return cmd, err
+}
+
+func (c *Command) ValidateArgs(args []string) error {
+	if c.Args == nil {
+		return nil
+	}
+	return c.Args(c, args)
+}
+
+func (c *Command) validateRequiredFlags() error {
+	flags := c.Flags()
+	missingFlagNames := []string{}
+	flags.VisitAll(func(pflag *flag.Flag) {
+		requiredAnnotation, found := pflag.Annotations[BashCompOneRequiredFlag]
+		if !found {
+			return
+		}
+		if (requiredAnnotation[0] == "true") && !pflag.Changed {
+			missingFlagNames = append(missingFlagNames, pflag.Name)
+		}
+	})
+
+	if len(missingFlagNames) > 0 {
+		return fmt.Errorf(`required flag(s) "%s" not set`, strings.Join(missingFlagNames, `", "`))
+	}
+	return nil
+}
+
+// InitDefaultHelpFlag adds default help flag to c.
+// It is called automatically by executing the c or by calling help and usage.
+// If c already has help flag, it will do nothing.
+func (c *Command) InitDefaultHelpFlag() {
+	c.mergePersistentFlags()
+	if c.Flags().Lookup("help") == nil {
+		usage := "help for "
+		if c.Name() == "" {
+			usage += "this command"
+		} else {
+			usage += c.Name()
+		}
+		c.Flags().BoolP("help", "h", false, usage)
+	}
+}
+
+// InitDefaultVersionFlag adds default version flag to c.
+// It is called automatically by executing the c.
+// If c already has a version flag, it will do nothing.
+// If c.Version is empty, it will do nothing.
+func (c *Command) InitDefaultVersionFlag() {
+	if c.Version == "" {
+		return
+	}
+
+	c.mergePersistentFlags()
+	if c.Flags().Lookup("version") == nil {
+		usage := "version for "
+		if c.Name() == "" {
+			usage += "this command"
+		} else {
+			usage += c.Name()
+		}
+		c.Flags().Bool("version", false, usage)
+	}
+}
+
+// InitDefaultHelpCmd adds default help command to c.
+// It is called automatically by executing the c or by calling help and usage.
+// If c already has help command or c has no subcommands, it will do nothing.
+func (c *Command) InitDefaultHelpCmd() {
+	if !c.HasSubCommands() {
+		return
+	}
+
+	if c.helpCommand == nil {
+		c.helpCommand = &Command{
+			Use:   "help [command]",
+			Short: "Help about any command",
+			Long: `Help provides help for any command in the application.
+Simply type ` + c.Name() + ` help [path to command] for full details.`,
+
+			Run: func(c *Command, args []string) {
+				cmd, _, e := c.Root().Find(args)
+				if cmd == nil || e != nil {
+					c.Printf("Unknown help topic %#q\n", args)
+					c.Root().Usage()
+				} else {
+					cmd.InitDefaultHelpFlag() // make possible 'help' flag to be shown
+					cmd.Help()
+				}
+			},
+		}
+	}
+	c.RemoveCommand(c.helpCommand)
+	c.AddCommand(c.helpCommand)
+}
+
+// ResetCommands delete parent, subcommand and help command from c.
+func (c *Command) ResetCommands() {
+	c.parent = nil
+	c.commands = nil
+	c.helpCommand = nil
+	c.parentsPflags = nil
+}
+
+// Sorts commands by their names.
+type commandSorterByName []*Command
+
+func (c commandSorterByName) Len() int           { return len(c) }
+func (c commandSorterByName) Swap(i, j int)      { c[i], c[j] = c[j], c[i] }
+func (c commandSorterByName) Less(i, j int) bool { return c[i].Name() < c[j].Name() }
+
+// Commands returns a sorted slice of child commands.
+func (c *Command) Commands() []*Command {
+	// do not sort commands if it already sorted or sorting was disabled
+	if EnableCommandSorting && !c.commandsAreSorted {
+		sort.Sort(commandSorterByName(c.commands))
+		c.commandsAreSorted = true
+	}
+	return c.commands
+}
+
+// AddCommand adds one or more commands to this parent command.
+func (c *Command) AddCommand(cmds ...*Command) {
+	for i, x := range cmds {
+		if cmds[i] == c {
+			panic("Command can't be a child of itself")
+		}
+		cmds[i].parent = c
+		// update max lengths
+		usageLen := len(x.Use)
+		if usageLen > c.commandsMaxUseLen {
+			c.commandsMaxUseLen = usageLen
+		}
+		commandPathLen := len(x.CommandPath())
+		if commandPathLen > c.commandsMaxCommandPathLen {
+			c.commandsMaxCommandPathLen = commandPathLen
+		}
+		nameLen := len(x.Name())
+		if nameLen > c.commandsMaxNameLen {
+			c.commandsMaxNameLen = nameLen
+		}
+		// If global normalization function exists, update all children
+		if c.globNormFunc != nil {
+			x.SetGlobalNormalizationFunc(c.globNormFunc)
+		}
+		c.commands = append(c.commands, x)
+		c.commandsAreSorted = false
+	}
+}
+
+// RemoveCommand removes one or more commands from a parent command.
+func (c *Command) RemoveCommand(cmds ...*Command) {
+	commands := []*Command{}
+main:
+	for _, command := range c.commands {
+		for _, cmd := range cmds {
+			if command == cmd {
+				command.parent = nil
+				continue main
+			}
+		}
+		commands = append(commands, command)
+	}
+	c.commands = commands
+	// recompute all lengths
+	c.commandsMaxUseLen = 0
+	c.commandsMaxCommandPathLen = 0
+	c.commandsMaxNameLen = 0
+	for _, command := range c.commands {
+		usageLen := len(command.Use)
+		if usageLen > c.commandsMaxUseLen {
+			c.commandsMaxUseLen = usageLen
+		}
+		commandPathLen := len(command.CommandPath())
+		if commandPathLen > c.commandsMaxCommandPathLen {
+			c.commandsMaxCommandPathLen = commandPathLen
+		}
+		nameLen := len(command.Name())
+		if nameLen > c.commandsMaxNameLen {
+			c.commandsMaxNameLen = nameLen
+		}
+	}
+}
+
+// Print is a convenience method to Print to the defined output, fallback to Stderr if not set.
+func (c *Command) Print(i ...interface{}) {
+	fmt.Fprint(c.OutOrStderr(), i...)
+}
+
+// Println is a convenience method to Println to the defined output, fallback to Stderr if not set.
+func (c *Command) Println(i ...interface{}) {
+	c.Print(fmt.Sprintln(i...))
+}
+
+// Printf is a convenience method to Printf to the defined output, fallback to Stderr if not set.
+func (c *Command) Printf(format string, i ...interface{}) {
+	c.Print(fmt.Sprintf(format, i...))
+}
+
+// PrintErr is a convenience method to Print to the defined Err output, fallback to Stderr if not set.
+func (c *Command) PrintErr(i ...interface{}) {
+	fmt.Fprint(c.ErrOrStderr(), i...)
+}
+
+// PrintErrln is a convenience method to Println to the defined Err output, fallback to Stderr if not set.
+func (c *Command) PrintErrln(i ...interface{}) {
+	c.Print(fmt.Sprintln(i...))
+}
+
+// PrintErrf is a convenience method to Printf to the defined Err output, fallback to Stderr if not set.
+func (c *Command) PrintErrf(format string, i ...interface{}) {
+	c.Print(fmt.Sprintf(format, i...))
+}
+
+// CommandPath returns the full path to this command.
+func (c *Command) CommandPath() string {
+	if c.HasParent() {
+		return c.Parent().CommandPath() + " " + c.Name()
+	}
+	return c.Name()
+}
+
+// UseLine puts out the full usage for a given command (including parents).
+func (c *Command) UseLine() string {
+	var useline string
+	if c.HasParent() {
+		useline = c.parent.CommandPath() + " " + c.Use
+	} else {
+		useline = c.Use
+	}
+	if c.DisableFlagsInUseLine {
+		return useline
+	}
+	if c.HasAvailableFlags() && !strings.Contains(useline, "[flags]") {
+		useline += " [flags]"
+	}
+	return useline
+}
+
+// DebugFlags used to determine which flags have been assigned to which commands
+// and which persist.
+func (c *Command) DebugFlags() {
+	c.Println("DebugFlags called on", c.Name())
+	var debugflags func(*Command)
+
+	debugflags = func(x *Command) {
+		if x.HasFlags() || x.HasPersistentFlags() {
+			c.Println(x.Name())
+		}
+		if x.HasFlags() {
+			x.flags.VisitAll(func(f *flag.Flag) {
+				if x.HasPersistentFlags() && x.persistentFlag(f.Name) != nil {
+					c.Println("  -"+f.Shorthand+",", "--"+f.Name, "["+f.DefValue+"]", "", f.Value, "  [LP]")
+				} else {
+					c.Println("  -"+f.Shorthand+",", "--"+f.Name, "["+f.DefValue+"]", "", f.Value, "  [L]")
+				}
+			})
+		}
+		if x.HasPersistentFlags() {
+			x.pflags.VisitAll(func(f *flag.Flag) {
+				if x.HasFlags() {
+					if x.flags.Lookup(f.Name) == nil {
+						c.Println("  -"+f.Shorthand+",", "--"+f.Name, "["+f.DefValue+"]", "", f.Value, "  [P]")
+					}
+				} else {
+					c.Println("  -"+f.Shorthand+",", "--"+f.Name, "["+f.DefValue+"]", "", f.Value, "  [P]")
+				}
+			})
+		}
+		c.Println(x.flagErrorBuf)
+		if x.HasSubCommands() {
+			for _, y := range x.commands {
+				debugflags(y)
+			}
+		}
+	}
+
+	debugflags(c)
+}
+
+// Name returns the command's name: the first word in the use line.
+func (c *Command) Name() string {
+	name := c.Use
+	i := strings.Index(name, " ")
+	if i >= 0 {
+		name = name[:i]
+	}
+	return name
+}
+
+// HasAlias determines if a given string is an alias of the command.
+func (c *Command) HasAlias(s string) bool {
+	for _, a := range c.Aliases {
+		if a == s {
+			return true
+		}
+	}
+	return false
+}
+
+// CalledAs returns the command name or alias that was used to invoke
+// this command or an empty string if the command has not been called.
+func (c *Command) CalledAs() string {
+	if c.commandCalledAs.called {
+		return c.commandCalledAs.name
+	}
+	return ""
+}
+
+// hasNameOrAliasPrefix returns true if the Name or any of aliases start
+// with prefix
+func (c *Command) hasNameOrAliasPrefix(prefix string) bool {
+	if strings.HasPrefix(c.Name(), prefix) {
+		c.commandCalledAs.name = c.Name()
+		return true
+	}
+	for _, alias := range c.Aliases {
+		if strings.HasPrefix(alias, prefix) {
+			c.commandCalledAs.name = alias
+			return true
+		}
+	}
+	return false
+}
+
+// NameAndAliases returns a list of the command name and all aliases
+func (c *Command) NameAndAliases() string {
+	return strings.Join(append([]string{c.Name()}, c.Aliases...), ", ")
+}
+
+// HasExample determines if the command has example.
+func (c *Command) HasExample() bool {
+	return len(c.Example) > 0
+}
+
+// Runnable determines if the command is itself runnable.
+func (c *Command) Runnable() bool {
+	return c.Run != nil || c.RunE != nil
+}
+
+// HasSubCommands determines if the command has children commands.
+func (c *Command) HasSubCommands() bool {
+	return len(c.commands) > 0
+}
+
+// IsAvailableCommand determines if a command is available as a non-help command
+// (this includes all non deprecated/hidden commands).
+func (c *Command) IsAvailableCommand() bool {
+	if len(c.Deprecated) != 0 || c.Hidden {
+		return false
+	}
+
+	if c.HasParent() && c.Parent().helpCommand == c {
+		return false
+	}
+
+	if c.Runnable() || c.HasAvailableSubCommands() {
+		return true
+	}
+
+	return false
+}
+
+// IsAdditionalHelpTopicCommand determines if a command is an additional
+// help topic command; additional help topic command is determined by the
+// fact that it is NOT runnable/hidden/deprecated, and has no sub commands that
+// are runnable/hidden/deprecated.
+// Concrete example: https://github.com/spf13/cobra/issues/393#issuecomment-282741924.
+func (c *Command) IsAdditionalHelpTopicCommand() bool {
+	// if a command is runnable, deprecated, or hidden it is not a 'help' command
+	if c.Runnable() || len(c.Deprecated) != 0 || c.Hidden {
+		return false
+	}
+
+	// if any non-help sub commands are found, the command is not a 'help' command
+	for _, sub := range c.commands {
+		if !sub.IsAdditionalHelpTopicCommand() {
+			return false
+		}
+	}
+
+	// the command either has no sub commands, or no non-help sub commands
+	return true
+}
+
+// HasHelpSubCommands determines if a command has any available 'help' sub commands
+// that need to be shown in the usage/help default template under 'additional help
+// topics'.
+func (c *Command) HasHelpSubCommands() bool {
+	// return true on the first found available 'help' sub command
+	for _, sub := range c.commands {
+		if sub.IsAdditionalHelpTopicCommand() {
+			return true
+		}
+	}
+
+	// the command either has no sub commands, or no available 'help' sub commands
+	return false
+}
+
+// HasAvailableSubCommands determines if a command has available sub commands that
+// need to be shown in the usage/help default template under 'available commands'.
+func (c *Command) HasAvailableSubCommands() bool {
+	// return true on the first found available (non deprecated/help/hidden)
+	// sub command
+	for _, sub := range c.commands {
+		if sub.IsAvailableCommand() {
+			return true
+		}
+	}
+
+	// the command either has no sub commands, or no available (non deprecated/help/hidden)
+	// sub commands
+	return false
+}
+
+// HasParent determines if the command is a child command.
+func (c *Command) HasParent() bool {
+	return c.parent != nil
+}
+
+// GlobalNormalizationFunc returns the global normalization function or nil if it doesn't exist.
+func (c *Command) GlobalNormalizationFunc() func(f *flag.FlagSet, name string) flag.NormalizedName {
+	return c.globNormFunc
+}
+
+// Flags returns the complete FlagSet that applies
+// to this command (local and persistent declared here and by all parents).
+func (c *Command) Flags() *flag.FlagSet {
+	if c.flags == nil {
+		c.flags = flag.NewFlagSet(c.Name(), flag.ContinueOnError)
+		if c.flagErrorBuf == nil {
+			c.flagErrorBuf = new(bytes.Buffer)
+		}
+		c.flags.SetOutput(c.flagErrorBuf)
+	}
+
+	return c.flags
+}
+
+// LocalNonPersistentFlags are flags specific to this command which will NOT persist to subcommands.
+func (c *Command) LocalNonPersistentFlags() *flag.FlagSet {
+	persistentFlags := c.PersistentFlags()
+
+	out := flag.NewFlagSet(c.Name(), flag.ContinueOnError)
+	c.LocalFlags().VisitAll(func(f *flag.Flag) {
+		if persistentFlags.Lookup(f.Name) == nil {
+			out.AddFlag(f)
+		}
+	})
+	return out
+}
+
+// LocalFlags returns the local FlagSet specifically set in the current command.
+func (c *Command) LocalFlags() *flag.FlagSet {
+	c.mergePersistentFlags()
+
+	if c.lflags == nil {
+		c.lflags = flag.NewFlagSet(c.Name(), flag.ContinueOnError)
+		if c.flagErrorBuf == nil {
+			c.flagErrorBuf = new(bytes.Buffer)
+		}
+		c.lflags.SetOutput(c.flagErrorBuf)
+	}
+	c.lflags.SortFlags = c.Flags().SortFlags
+	if c.globNormFunc != nil {
+		c.lflags.SetNormalizeFunc(c.globNormFunc)
+	}
+
+	addToLocal := func(f *flag.Flag) {
+		if c.lflags.Lookup(f.Name) == nil && c.parentsPflags.Lookup(f.Name) == nil {
+			c.lflags.AddFlag(f)
+		}
+	}
+	c.Flags().VisitAll(addToLocal)
+	c.PersistentFlags().VisitAll(addToLocal)
+	return c.lflags
+}
+
+// InheritedFlags returns all flags which were inherited from parent commands.
+func (c *Command) InheritedFlags() *flag.FlagSet {
+	c.mergePersistentFlags()
+
+	if c.iflags == nil {
+		c.iflags = flag.NewFlagSet(c.Name(), flag.ContinueOnError)
+		if c.flagErrorBuf == nil {
+			c.flagErrorBuf = new(bytes.Buffer)
+		}
+		c.iflags.SetOutput(c.flagErrorBuf)
+	}
+
+	local := c.LocalFlags()
+	if c.globNormFunc != nil {
+		c.iflags.SetNormalizeFunc(c.globNormFunc)
+	}
+
+	c.parentsPflags.VisitAll(func(f *flag.Flag) {
+		if c.iflags.Lookup(f.Name) == nil && local.Lookup(f.Name) == nil {
+			c.iflags.AddFlag(f)
+		}
+	})
+	return c.iflags
+}
+
+// NonInheritedFlags returns all flags which were not inherited from parent commands.
+func (c *Command) NonInheritedFlags() *flag.FlagSet {
+	return c.LocalFlags()
+}
+
+// PersistentFlags returns the persistent FlagSet specifically set in the current command.
+func (c *Command) PersistentFlags() *flag.FlagSet {
+	if c.pflags == nil {
+		c.pflags = flag.NewFlagSet(c.Name(), flag.ContinueOnError)
+		if c.flagErrorBuf == nil {
+			c.flagErrorBuf = new(bytes.Buffer)
+		}
+		c.pflags.SetOutput(c.flagErrorBuf)
+	}
+	return c.pflags
+}
+
+// ResetFlags deletes all flags from command.
+func (c *Command) ResetFlags() {
+	c.flagErrorBuf = new(bytes.Buffer)
+	c.flagErrorBuf.Reset()
+	c.flags = flag.NewFlagSet(c.Name(), flag.ContinueOnError)
+	c.flags.SetOutput(c.flagErrorBuf)
+	c.pflags = flag.NewFlagSet(c.Name(), flag.ContinueOnError)
+	c.pflags.SetOutput(c.flagErrorBuf)
+
+	c.lflags = nil
+	c.iflags = nil
+	c.parentsPflags = nil
+}
+
+// HasFlags checks if the command contains any flags (local plus persistent from the entire structure).
+func (c *Command) HasFlags() bool {
+	return c.Flags().HasFlags()
+}
+
+// HasPersistentFlags checks if the command contains persistent flags.
+func (c *Command) HasPersistentFlags() bool {
+	return c.PersistentFlags().HasFlags()
+}
+
+// HasLocalFlags checks if the command has flags specifically declared locally.
+func (c *Command) HasLocalFlags() bool {
+	return c.LocalFlags().HasFlags()
+}
+
+// HasInheritedFlags checks if the command has flags inherited from its parent command.
+func (c *Command) HasInheritedFlags() bool {
+	return c.InheritedFlags().HasFlags()
+}
+
+// HasAvailableFlags checks if the command contains any flags (local plus persistent from the entire
+// structure) which are not hidden or deprecated.
+func (c *Command) HasAvailableFlags() bool {
+	return c.Flags().HasAvailableFlags()
+}
+
+// HasAvailablePersistentFlags checks if the command contains persistent flags which are not hidden or deprecated.
+func (c *Command) HasAvailablePersistentFlags() bool {
+	return c.PersistentFlags().HasAvailableFlags()
+}
+
+// HasAvailableLocalFlags checks if the command has flags specifically declared locally which are not hidden
+// or deprecated.
+func (c *Command) HasAvailableLocalFlags() bool {
+	return c.LocalFlags().HasAvailableFlags()
+}
+
+// HasAvailableInheritedFlags checks if the command has flags inherited from its parent command which are
+// not hidden or deprecated.
+func (c *Command) HasAvailableInheritedFlags() bool {
+	return c.InheritedFlags().HasAvailableFlags()
+}
+
+// Flag climbs up the command tree looking for matching flag.
+func (c *Command) Flag(name string) (flag *flag.Flag) {
+	flag = c.Flags().Lookup(name)
+
+	if flag == nil {
+		flag = c.persistentFlag(name)
+	}
+
+	return
+}
+
+// Recursively find matching persistent flag.
+func (c *Command) persistentFlag(name string) (flag *flag.Flag) {
+	if c.HasPersistentFlags() {
+		flag = c.PersistentFlags().Lookup(name)
+	}
+
+	if flag == nil {
+		c.updateParentsPflags()
+		flag = c.parentsPflags.Lookup(name)
+	}
+	return
+}
+
+// ParseFlags parses persistent flag tree and local flags.
+func (c *Command) ParseFlags(args []string) error {
+	if c.DisableFlagParsing {
+		return nil
+	}
+
+	if c.flagErrorBuf == nil {
+		c.flagErrorBuf = new(bytes.Buffer)
+	}
+	beforeErrorBufLen := c.flagErrorBuf.Len()
+	c.mergePersistentFlags()
+
+	//do it here after merging all flags and just before parse
+	c.Flags().ParseErrorsWhitelist = flag.ParseErrorsWhitelist(c.FParseErrWhitelist)
+
+	err := c.Flags().Parse(args)
+	// Print warnings if they occurred (e.g. deprecated flag messages).
+	if c.flagErrorBuf.Len()-beforeErrorBufLen > 0 && err == nil {
+		c.Print(c.flagErrorBuf.String())
+	}
+
+	return err
+}
+
+// Parent returns a commands parent command.
+func (c *Command) Parent() *Command {
+	return c.parent
+}
+
+// mergePersistentFlags merges c.PersistentFlags() to c.Flags()
+// and adds missing persistent flags of all parents.
+func (c *Command) mergePersistentFlags() {
+	c.updateParentsPflags()
+	c.Flags().AddFlagSet(c.PersistentFlags())
+	c.Flags().AddFlagSet(c.parentsPflags)
+}
+
+// updateParentsPflags updates c.parentsPflags by adding
+// new persistent flags of all parents.
+// If c.parentsPflags == nil, it makes new.
+func (c *Command) updateParentsPflags() {
+	if c.parentsPflags == nil {
+		c.parentsPflags = flag.NewFlagSet(c.Name(), flag.ContinueOnError)
+		c.parentsPflags.SetOutput(c.flagErrorBuf)
+		c.parentsPflags.SortFlags = false
+	}
+
+	if c.globNormFunc != nil {
+		c.parentsPflags.SetNormalizeFunc(c.globNormFunc)
+	}
+
+	c.Root().PersistentFlags().AddFlagSet(flag.CommandLine)
+
+	c.VisitParents(func(parent *Command) {
+		c.parentsPflags.AddFlagSet(parent.PersistentFlags())
+	})
+}
diff --git a/vendor/github.com/spf13/cobra/command_notwin.go b/vendor/github.com/spf13/cobra/command_notwin.go
new file mode 100644
index 00000000..6159c1cc
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/command_notwin.go
@@ -0,0 +1,5 @@
+// +build !windows
+
+package cobra
+
+var preExecHookFn func(*Command)
diff --git a/vendor/github.com/spf13/cobra/command_win.go b/vendor/github.com/spf13/cobra/command_win.go
new file mode 100644
index 00000000..8768b173
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/command_win.go
@@ -0,0 +1,26 @@
+// +build windows
+
+package cobra
+
+import (
+	"fmt"
+	"os"
+	"time"
+
+	"github.com/inconshreveable/mousetrap"
+)
+
+var preExecHookFn = preExecHook
+
+func preExecHook(c *Command) {
+	if MousetrapHelpText != "" && mousetrap.StartedByExplorer() {
+		c.Print(MousetrapHelpText)
+		if MousetrapDisplayDuration > 0 {
+			time.Sleep(MousetrapDisplayDuration)
+		} else {
+			c.Println("Press return to continue...")
+			fmt.Scanln()
+		}
+		os.Exit(1)
+	}
+}
diff --git a/vendor/github.com/spf13/cobra/go.mod b/vendor/github.com/spf13/cobra/go.mod
new file mode 100644
index 00000000..9a9eb65a
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/go.mod
@@ -0,0 +1,13 @@
+module github.com/spf13/cobra
+
+go 1.12
+
+require (
+	github.com/BurntSushi/toml v0.3.1 // indirect
+	github.com/cpuguy83/go-md2man v1.0.10
+	github.com/inconshreveable/mousetrap v1.0.0
+	github.com/mitchellh/go-homedir v1.1.0
+	github.com/spf13/pflag v1.0.3
+	github.com/spf13/viper v1.3.2
+	gopkg.in/yaml.v2 v2.2.2
+)
diff --git a/vendor/github.com/spf13/cobra/go.sum b/vendor/github.com/spf13/cobra/go.sum
new file mode 100644
index 00000000..9761f4d0
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/go.sum
@@ -0,0 +1,51 @@
+github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
+github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
+github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
+github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
+github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk=
+github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
+github.com/cpuguy83/go-md2man v1.0.10 h1:BSKMNlYxDvnunlTymqtgONjNnaRV1sTpcovwwjF22jk=
+github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE=
+github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
+github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=
+github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
+github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4=
+github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
+github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM=
+github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
+github.com/magiconair/properties v1.8.0 h1:LLgXmsheXeRoUOBOjtwPQCWIYqM/LU1ayDtDePerRcY=
+github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
+github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
+github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
+github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE=
+github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
+github.com/pelletier/go-toml v1.2.0 h1:T5zMGML61Wp+FlcbWjRDT7yAxhJNAiPPLOFECq181zc=
+github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
+github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
+github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/russross/blackfriday v1.5.2 h1:HyvC0ARfnZBqnXwABFeSZHpKvJHJJfPz81GNueLj0oo=
+github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
+github.com/spf13/afero v1.1.2 h1:m8/z1t7/fwjysjQRYbP0RD+bUIF/8tJwPdEZsI83ACI=
+github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
+github.com/spf13/cast v1.3.0 h1:oget//CVOEoFewqQxwr0Ej5yjygnqGkvggSE/gB35Q8=
+github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
+github.com/spf13/jwalterweatherman v1.0.0 h1:XHEdyB+EcvlqZamSM4ZOMGlc93t6AcsBEu9Gc1vn7yk=
+github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
+github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg=
+github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
+github.com/spf13/viper v1.3.2 h1:VUFqw5KcqRf7i70GOzW7N+Q7+gxVBkSSqiXB12+JQ4M=
+github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s=
+github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w=
+github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
+github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0=
+github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
+golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
+golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a h1:1n5lsVfiQW3yfsRGu98756EH1YthsFqr/5mxHduZW2A=
+golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
+golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw=
+gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
diff --git a/vendor/github.com/spf13/cobra/powershell_completions.go b/vendor/github.com/spf13/cobra/powershell_completions.go
new file mode 100644
index 00000000..756c61b9
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/powershell_completions.go
@@ -0,0 +1,100 @@
+// PowerShell completions are based on the amazing work from clap:
+// https://github.com/clap-rs/clap/blob/3294d18efe5f264d12c9035f404c7d189d4824e1/src/completions/powershell.rs
+//
+// The generated scripts require PowerShell v5.0+ (which comes Windows 10, but
+// can be downloaded separately for windows 7 or 8.1).
+
+package cobra
+
+import (
+	"bytes"
+	"fmt"
+	"io"
+	"os"
+	"strings"
+
+	"github.com/spf13/pflag"
+)
+
+var powerShellCompletionTemplate = `using namespace System.Management.Automation
+using namespace System.Management.Automation.Language
+Register-ArgumentCompleter -Native -CommandName '%s' -ScriptBlock {
+    param($wordToComplete, $commandAst, $cursorPosition)
+    $commandElements = $commandAst.CommandElements
+    $command = @(
+        '%s'
+        for ($i = 1; $i -lt $commandElements.Count; $i++) {
+            $element = $commandElements[$i]
+            if ($element -isnot [StringConstantExpressionAst] -or
+                $element.StringConstantType -ne [StringConstantType]::BareWord -or
+                $element.Value.StartsWith('-')) {
+                break
+            }
+            $element.Value
+        }
+    ) -join ';'
+    $completions = @(switch ($command) {%s
+    })
+    $completions.Where{ $_.CompletionText -like "$wordToComplete*" } |
+        Sort-Object -Property ListItemText
+}`
+
+func generatePowerShellSubcommandCases(out io.Writer, cmd *Command, previousCommandName string) {
+	var cmdName string
+	if previousCommandName == "" {
+		cmdName = cmd.Name()
+	} else {
+		cmdName = fmt.Sprintf("%s;%s", previousCommandName, cmd.Name())
+	}
+
+	fmt.Fprintf(out, "\n        '%s' {", cmdName)
+
+	cmd.Flags().VisitAll(func(flag *pflag.Flag) {
+		if nonCompletableFlag(flag) {
+			return
+		}
+		usage := escapeStringForPowerShell(flag.Usage)
+		if len(flag.Shorthand) > 0 {
+			fmt.Fprintf(out, "\n            [CompletionResult]::new('-%s', '%s', [CompletionResultType]::ParameterName, '%s')", flag.Shorthand, flag.Shorthand, usage)
+		}
+		fmt.Fprintf(out, "\n            [CompletionResult]::new('--%s', '%s', [CompletionResultType]::ParameterName, '%s')", flag.Name, flag.Name, usage)
+	})
+
+	for _, subCmd := range cmd.Commands() {
+		usage := escapeStringForPowerShell(subCmd.Short)
+		fmt.Fprintf(out, "\n            [CompletionResult]::new('%s', '%s', [CompletionResultType]::ParameterValue, '%s')", subCmd.Name(), subCmd.Name(), usage)
+	}
+
+	fmt.Fprint(out, "\n            break\n        }")
+
+	for _, subCmd := range cmd.Commands() {
+		generatePowerShellSubcommandCases(out, subCmd, cmdName)
+	}
+}
+
+func escapeStringForPowerShell(s string) string {
+	return strings.Replace(s, "'", "''", -1)
+}
+
+// GenPowerShellCompletion generates PowerShell completion file and writes to the passed writer.
+func (c *Command) GenPowerShellCompletion(w io.Writer) error {
+	buf := new(bytes.Buffer)
+
+	var subCommandCases bytes.Buffer
+	generatePowerShellSubcommandCases(&subCommandCases, c, "")
+	fmt.Fprintf(buf, powerShellCompletionTemplate, c.Name(), c.Name(), subCommandCases.String())
+
+	_, err := buf.WriteTo(w)
+	return err
+}
+
+// GenPowerShellCompletionFile generates PowerShell completion file.
+func (c *Command) GenPowerShellCompletionFile(filename string) error {
+	outFile, err := os.Create(filename)
+	if err != nil {
+		return err
+	}
+	defer outFile.Close()
+
+	return c.GenPowerShellCompletion(outFile)
+}
diff --git a/vendor/github.com/spf13/cobra/powershell_completions.md b/vendor/github.com/spf13/cobra/powershell_completions.md
new file mode 100644
index 00000000..afed8024
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/powershell_completions.md
@@ -0,0 +1,14 @@
+# Generating PowerShell Completions For Your Own cobra.Command
+
+Cobra can generate PowerShell completion scripts. Users need PowerShell version 5.0 or above, which comes with Windows 10 and can be downloaded separately for Windows 7 or 8.1. They can then write the completions to a file and source this file from their PowerShell profile, which is referenced by the `$Profile` environment variable. See `Get-Help about_Profiles` for more info about PowerShell profiles.
+
+# What's supported
+
+- Completion for subcommands using their `.Short` description
+- Completion for non-hidden flags using their `.Name` and `.Shorthand`
+
+# What's not yet supported
+
+- Command aliases
+- Required, filename or custom flags (they will work like normal flags)
+- Custom completion scripts
diff --git a/vendor/github.com/spf13/cobra/shell_completions.go b/vendor/github.com/spf13/cobra/shell_completions.go
new file mode 100644
index 00000000..ba0af9cb
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/shell_completions.go
@@ -0,0 +1,85 @@
+package cobra
+
+import (
+	"github.com/spf13/pflag"
+)
+
+// MarkFlagRequired adds the BashCompOneRequiredFlag annotation to the named flag if it exists,
+// and causes your command to report an error if invoked without the flag.
+func (c *Command) MarkFlagRequired(name string) error {
+	return MarkFlagRequired(c.Flags(), name)
+}
+
+// MarkPersistentFlagRequired adds the BashCompOneRequiredFlag annotation to the named persistent flag if it exists,
+// and causes your command to report an error if invoked without the flag.
+func (c *Command) MarkPersistentFlagRequired(name string) error {
+	return MarkFlagRequired(c.PersistentFlags(), name)
+}
+
+// MarkFlagRequired adds the BashCompOneRequiredFlag annotation to the named flag if it exists,
+// and causes your command to report an error if invoked without the flag.
+func MarkFlagRequired(flags *pflag.FlagSet, name string) error {
+	return flags.SetAnnotation(name, BashCompOneRequiredFlag, []string{"true"})
+}
+
+// MarkFlagFilename adds the BashCompFilenameExt annotation to the named flag, if it exists.
+// Generated bash autocompletion will select filenames for the flag, limiting to named extensions if provided.
+func (c *Command) MarkFlagFilename(name string, extensions ...string) error {
+	return MarkFlagFilename(c.Flags(), name, extensions...)
+}
+
+// MarkFlagCustom adds the BashCompCustom annotation to the named flag, if it exists.
+// Generated bash autocompletion will call the bash function f for the flag.
+func (c *Command) MarkFlagCustom(name string, f string) error {
+	return MarkFlagCustom(c.Flags(), name, f)
+}
+
+// MarkPersistentFlagFilename instructs the various shell completion
+// implementations to limit completions for this persistent flag to the
+// specified extensions (patterns).
+//
+// Shell Completion compatibility matrix: bash, zsh
+func (c *Command) MarkPersistentFlagFilename(name string, extensions ...string) error {
+	return MarkFlagFilename(c.PersistentFlags(), name, extensions...)
+}
+
+// MarkFlagFilename instructs the various shell completion implementations to
+// limit completions for this flag to the specified extensions (patterns).
+//
+// Shell Completion compatibility matrix: bash, zsh
+func MarkFlagFilename(flags *pflag.FlagSet, name string, extensions ...string) error {
+	return flags.SetAnnotation(name, BashCompFilenameExt, extensions)
+}
+
+// MarkFlagCustom instructs the various shell completion implementations to
+// limit completions for this flag to the specified extensions (patterns).
+//
+// Shell Completion compatibility matrix: bash, zsh
+func MarkFlagCustom(flags *pflag.FlagSet, name string, f string) error {
+	return flags.SetAnnotation(name, BashCompCustom, []string{f})
+}
+
+// MarkFlagDirname instructs the various shell completion implementations to
+// complete only directories with this named flag.
+//
+// Shell Completion compatibility matrix: zsh
+func (c *Command) MarkFlagDirname(name string) error {
+	return MarkFlagDirname(c.Flags(), name)
+}
+
+// MarkPersistentFlagDirname instructs the various shell completion
+// implementations to complete only directories with this persistent named flag.
+//
+// Shell Completion compatibility matrix: zsh
+func (c *Command) MarkPersistentFlagDirname(name string) error {
+	return MarkFlagDirname(c.PersistentFlags(), name)
+}
+
+// MarkFlagDirname instructs the various shell completion implementations to
+// complete only directories with this specified flag.
+//
+// Shell Completion compatibility matrix: zsh
+func MarkFlagDirname(flags *pflag.FlagSet, name string) error {
+	zshPattern := "-(/)"
+	return flags.SetAnnotation(name, zshCompDirname, []string{zshPattern})
+}
diff --git a/vendor/github.com/spf13/cobra/zsh_completions.go b/vendor/github.com/spf13/cobra/zsh_completions.go
new file mode 100644
index 00000000..12755482
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/zsh_completions.go
@@ -0,0 +1,336 @@
+package cobra
+
+import (
+	"encoding/json"
+	"fmt"
+	"io"
+	"os"
+	"sort"
+	"strings"
+	"text/template"
+
+	"github.com/spf13/pflag"
+)
+
+const (
+	zshCompArgumentAnnotation   = "cobra_annotations_zsh_completion_argument_annotation"
+	zshCompArgumentFilenameComp = "cobra_annotations_zsh_completion_argument_file_completion"
+	zshCompArgumentWordComp     = "cobra_annotations_zsh_completion_argument_word_completion"
+	zshCompDirname              = "cobra_annotations_zsh_dirname"
+)
+
+var (
+	zshCompFuncMap = template.FuncMap{
+		"genZshFuncName":              zshCompGenFuncName,
+		"extractFlags":                zshCompExtractFlag,
+		"genFlagEntryForZshArguments": zshCompGenFlagEntryForArguments,
+		"extractArgsCompletions":      zshCompExtractArgumentCompletionHintsForRendering,
+	}
+	zshCompletionText = `
+{{/* should accept Command (that contains subcommands) as parameter */}}
+{{define "argumentsC" -}}
+{{ $cmdPath := genZshFuncName .}}
+function {{$cmdPath}} {
+  local -a commands
+
+  _arguments -C \{{- range extractFlags .}}
+    {{genFlagEntryForZshArguments .}} \{{- end}}
+    "1: :->cmnds" \
+    "*::arg:->args"
+
+  case $state in
+  cmnds)
+    commands=({{range .Commands}}{{if not .Hidden}}
+      "{{.Name}}:{{.Short}}"{{end}}{{end}}
+    )
+    _describe "command" commands
+    ;;
+  esac
+
+  case "$words[1]" in {{- range .Commands}}{{if not .Hidden}}
+  {{.Name}})
+    {{$cmdPath}}_{{.Name}}
+    ;;{{end}}{{end}}
+  esac
+}
+{{range .Commands}}{{if not .Hidden}}
+{{template "selectCmdTemplate" .}}
+{{- end}}{{end}}
+{{- end}}
+
+{{/* should accept Command without subcommands as parameter */}}
+{{define "arguments" -}}
+function {{genZshFuncName .}} {
+{{"  _arguments"}}{{range extractFlags .}} \
+    {{genFlagEntryForZshArguments . -}}
+{{end}}{{range extractArgsCompletions .}} \
+    {{.}}{{end}}
+}
+{{end}}
+
+{{/* dispatcher for commands with or without subcommands */}}
+{{define "selectCmdTemplate" -}}
+{{if .Hidden}}{{/* ignore hidden*/}}{{else -}}
+{{if .Commands}}{{template "argumentsC" .}}{{else}}{{template "arguments" .}}{{end}}
+{{- end}}
+{{- end}}
+
+{{/* template entry point */}}
+{{define "Main" -}}
+#compdef _{{.Name}} {{.Name}}
+
+{{template "selectCmdTemplate" .}}
+{{end}}
+`
+)
+
+// zshCompArgsAnnotation is used to encode/decode zsh completion for
+// arguments to/from Command.Annotations.
+type zshCompArgsAnnotation map[int]zshCompArgHint
+
+type zshCompArgHint struct {
+	// Indicates the type of the completion to use. One of:
+	// zshCompArgumentFilenameComp or zshCompArgumentWordComp
+	Tipe string `json:"type"`
+
+	// A value for the type above (globs for file completion or words)
+	Options []string `json:"options"`
+}
+
+// GenZshCompletionFile generates zsh completion file.
+func (c *Command) GenZshCompletionFile(filename string) error {
+	outFile, err := os.Create(filename)
+	if err != nil {
+		return err
+	}
+	defer outFile.Close()
+
+	return c.GenZshCompletion(outFile)
+}
+
+// GenZshCompletion generates a zsh completion file and writes to the passed
+// writer. The completion always run on the root command regardless of the
+// command it was called from.
+func (c *Command) GenZshCompletion(w io.Writer) error {
+	tmpl, err := template.New("Main").Funcs(zshCompFuncMap).Parse(zshCompletionText)
+	if err != nil {
+		return fmt.Errorf("error creating zsh completion template: %v", err)
+	}
+	return tmpl.Execute(w, c.Root())
+}
+
+// MarkZshCompPositionalArgumentFile marks the specified argument (first
+// argument is 1) as completed by file selection. patterns (e.g. "*.txt") are
+// optional - if not provided the completion will search for all files.
+func (c *Command) MarkZshCompPositionalArgumentFile(argPosition int, patterns ...string) error {
+	if argPosition < 1 {
+		return fmt.Errorf("Invalid argument position (%d)", argPosition)
+	}
+	annotation, err := c.zshCompGetArgsAnnotations()
+	if err != nil {
+		return err
+	}
+	if c.zshcompArgsAnnotationnIsDuplicatePosition(annotation, argPosition) {
+		return fmt.Errorf("Duplicate annotation for positional argument at index %d", argPosition)
+	}
+	annotation[argPosition] = zshCompArgHint{
+		Tipe:    zshCompArgumentFilenameComp,
+		Options: patterns,
+	}
+	return c.zshCompSetArgsAnnotations(annotation)
+}
+
+// MarkZshCompPositionalArgumentWords marks the specified positional argument
+// (first argument is 1) as completed by the provided words. At east one word
+// must be provided, spaces within words will be offered completion with
+// "word\ word".
+func (c *Command) MarkZshCompPositionalArgumentWords(argPosition int, words ...string) error {
+	if argPosition < 1 {
+		return fmt.Errorf("Invalid argument position (%d)", argPosition)
+	}
+	if len(words) == 0 {
+		return fmt.Errorf("Trying to set empty word list for positional argument %d", argPosition)
+	}
+	annotation, err := c.zshCompGetArgsAnnotations()
+	if err != nil {
+		return err
+	}
+	if c.zshcompArgsAnnotationnIsDuplicatePosition(annotation, argPosition) {
+		return fmt.Errorf("Duplicate annotation for positional argument at index %d", argPosition)
+	}
+	annotation[argPosition] = zshCompArgHint{
+		Tipe:    zshCompArgumentWordComp,
+		Options: words,
+	}
+	return c.zshCompSetArgsAnnotations(annotation)
+}
+
+func zshCompExtractArgumentCompletionHintsForRendering(c *Command) ([]string, error) {
+	var result []string
+	annotation, err := c.zshCompGetArgsAnnotations()
+	if err != nil {
+		return nil, err
+	}
+	for k, v := range annotation {
+		s, err := zshCompRenderZshCompArgHint(k, v)
+		if err != nil {
+			return nil, err
+		}
+		result = append(result, s)
+	}
+	if len(c.ValidArgs) > 0 {
+		if _, positionOneExists := annotation[1]; !positionOneExists {
+			s, err := zshCompRenderZshCompArgHint(1, zshCompArgHint{
+				Tipe:    zshCompArgumentWordComp,
+				Options: c.ValidArgs,
+			})
+			if err != nil {
+				return nil, err
+			}
+			result = append(result, s)
+		}
+	}
+	sort.Strings(result)
+	return result, nil
+}
+
+func zshCompRenderZshCompArgHint(i int, z zshCompArgHint) (string, error) {
+	switch t := z.Tipe; t {
+	case zshCompArgumentFilenameComp:
+		var globs []string
+		for _, g := range z.Options {
+			globs = append(globs, fmt.Sprintf(`-g "%s"`, g))
+		}
+		return fmt.Sprintf(`'%d: :_files %s'`, i, strings.Join(globs, " ")), nil
+	case zshCompArgumentWordComp:
+		var words []string
+		for _, w := range z.Options {
+			words = append(words, fmt.Sprintf("%q", w))
+		}
+		return fmt.Sprintf(`'%d: :(%s)'`, i, strings.Join(words, " ")), nil
+	default:
+		return "", fmt.Errorf("Invalid zsh argument completion annotation: %s", t)
+	}
+}
+
+func (c *Command) zshcompArgsAnnotationnIsDuplicatePosition(annotation zshCompArgsAnnotation, position int) bool {
+	_, dup := annotation[position]
+	return dup
+}
+
+func (c *Command) zshCompGetArgsAnnotations() (zshCompArgsAnnotation, error) {
+	annotation := make(zshCompArgsAnnotation)
+	annotationString, ok := c.Annotations[zshCompArgumentAnnotation]
+	if !ok {
+		return annotation, nil
+	}
+	err := json.Unmarshal([]byte(annotationString), &annotation)
+	if err != nil {
+		return annotation, fmt.Errorf("Error unmarshaling zsh argument annotation: %v", err)
+	}
+	return annotation, nil
+}
+
+func (c *Command) zshCompSetArgsAnnotations(annotation zshCompArgsAnnotation) error {
+	jsn, err := json.Marshal(annotation)
+	if err != nil {
+		return fmt.Errorf("Error marshaling zsh argument annotation: %v", err)
+	}
+	if c.Annotations == nil {
+		c.Annotations = make(map[string]string)
+	}
+	c.Annotations[zshCompArgumentAnnotation] = string(jsn)
+	return nil
+}
+
+func zshCompGenFuncName(c *Command) string {
+	if c.HasParent() {
+		return zshCompGenFuncName(c.Parent()) + "_" + c.Name()
+	}
+	return "_" + c.Name()
+}
+
+func zshCompExtractFlag(c *Command) []*pflag.Flag {
+	var flags []*pflag.Flag
+	c.LocalFlags().VisitAll(func(f *pflag.Flag) {
+		if !f.Hidden {
+			flags = append(flags, f)
+		}
+	})
+	c.InheritedFlags().VisitAll(func(f *pflag.Flag) {
+		if !f.Hidden {
+			flags = append(flags, f)
+		}
+	})
+	return flags
+}
+
+// zshCompGenFlagEntryForArguments returns an entry that matches _arguments
+// zsh-completion parameters. It's too complicated to generate in a template.
+func zshCompGenFlagEntryForArguments(f *pflag.Flag) string {
+	if f.Name == "" || f.Shorthand == "" {
+		return zshCompGenFlagEntryForSingleOptionFlag(f)
+	}
+	return zshCompGenFlagEntryForMultiOptionFlag(f)
+}
+
+func zshCompGenFlagEntryForSingleOptionFlag(f *pflag.Flag) string {
+	var option, multiMark, extras string
+
+	if zshCompFlagCouldBeSpecifiedMoreThenOnce(f) {
+		multiMark = "*"
+	}
+
+	option = "--" + f.Name
+	if option == "--" {
+		option = "-" + f.Shorthand
+	}
+	extras = zshCompGenFlagEntryExtras(f)
+
+	return fmt.Sprintf(`'%s%s[%s]%s'`, multiMark, option, zshCompQuoteFlagDescription(f.Usage), extras)
+}
+
+func zshCompGenFlagEntryForMultiOptionFlag(f *pflag.Flag) string {
+	var options, parenMultiMark, curlyMultiMark, extras string
+
+	if zshCompFlagCouldBeSpecifiedMoreThenOnce(f) {
+		parenMultiMark = "*"
+		curlyMultiMark = "\\*"
+	}
+
+	options = fmt.Sprintf(`'(%s-%s %s--%s)'{%s-%s,%s--%s}`,
+		parenMultiMark, f.Shorthand, parenMultiMark, f.Name, curlyMultiMark, f.Shorthand, curlyMultiMark, f.Name)
+	extras = zshCompGenFlagEntryExtras(f)
+
+	return fmt.Sprintf(`%s'[%s]%s'`, options, zshCompQuoteFlagDescription(f.Usage), extras)
+}
+
+func zshCompGenFlagEntryExtras(f *pflag.Flag) string {
+	if f.NoOptDefVal != "" {
+		return ""
+	}
+
+	extras := ":" // allow options for flag (even without assistance)
+	for key, values := range f.Annotations {
+		switch key {
+		case zshCompDirname:
+			extras = fmt.Sprintf(":filename:_files -g %q", values[0])
+		case BashCompFilenameExt:
+			extras = ":filename:_files"
+			for _, pattern := range values {
+				extras = extras + fmt.Sprintf(` -g "%s"`, pattern)
+			}
+		}
+	}
+
+	return extras
+}
+
+func zshCompFlagCouldBeSpecifiedMoreThenOnce(f *pflag.Flag) bool {
+	return strings.Contains(f.Value.Type(), "Slice") ||
+		strings.Contains(f.Value.Type(), "Array")
+}
+
+func zshCompQuoteFlagDescription(s string) string {
+	return strings.Replace(s, "'", `'\''`, -1)
+}
diff --git a/vendor/github.com/spf13/cobra/zsh_completions.md b/vendor/github.com/spf13/cobra/zsh_completions.md
new file mode 100644
index 00000000..df9c2eac
--- /dev/null
+++ b/vendor/github.com/spf13/cobra/zsh_completions.md
@@ -0,0 +1,39 @@
+## Generating Zsh Completion for your cobra.Command
+
+Cobra supports native Zsh completion generated from the root `cobra.Command`.
+The generated completion script should be put somewhere in your `$fpath` named
+`_<YOUR COMMAND>`.
+
+### What's Supported
+
+* Completion for all non-hidden subcommands using their `.Short` description.
+* Completion for all non-hidden flags using the following rules:
+  * Filename completion works by marking the flag with `cmd.MarkFlagFilename...`
+    family of commands.
+  * The requirement for argument to the flag is decided by the `.NoOptDefVal`
+    flag value - if it's empty then completion will expect an argument.
+  * Flags of one of the various `*Array` and `*Slice` types supports multiple
+    specifications (with or without argument depending on the specific type).
+* Completion of positional arguments using the following rules:
+  * Argument position for all options below starts at `1`. If argument position
+    `0` is requested it will raise an error.
+  * Use `command.MarkZshCompPositionalArgumentFile` to complete filenames. Glob
+    patterns (e.g. `"*.log"`) are optional - if not specified it will offer to
+    complete all file types.
+  * Use `command.MarkZshCompPositionalArgumentWords` to offer specific words for
+    completion. At least one word is required.
+  * It's possible to specify completion for some arguments and leave some
+    unspecified (e.g. offer words for second argument but nothing for first
+    argument). This will cause no completion for first argument but words
+    completion for second argument.
+  * If no argument completion was specified for 1st argument (but optionally was
+    specified for 2nd) and the command has `ValidArgs` it will be used as
+    completion options for 1st argument.
+  * Argument completions only offered for commands with no subcommands.
+
+### What's not yet Supported
+
+* Custom completion scripts are not supported yet (We should probably create zsh
+  specific one, doesn't make sense to re-use the bash one as the functions will
+  be different).
+* Whatever other feature you're looking for and doesn't exist :)
diff --git a/vendor/golang.org/x/crypto/blowfish/block.go b/vendor/golang.org/x/crypto/blowfish/block.go
new file mode 100644
index 00000000..9d80f195
--- /dev/null
+++ b/vendor/golang.org/x/crypto/blowfish/block.go
@@ -0,0 +1,159 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package blowfish
+
+// getNextWord returns the next big-endian uint32 value from the byte slice
+// at the given position in a circular manner, updating the position.
+func getNextWord(b []byte, pos *int) uint32 {
+	var w uint32
+	j := *pos
+	for i := 0; i < 4; i++ {
+		w = w<<8 | uint32(b[j])
+		j++
+		if j >= len(b) {
+			j = 0
+		}
+	}
+	*pos = j
+	return w
+}
+
+// ExpandKey performs a key expansion on the given *Cipher. Specifically, it
+// performs the Blowfish algorithm's key schedule which sets up the *Cipher's
+// pi and substitution tables for calls to Encrypt. This is used, primarily,
+// by the bcrypt package to reuse the Blowfish key schedule during its
+// set up. It's unlikely that you need to use this directly.
+func ExpandKey(key []byte, c *Cipher) {
+	j := 0
+	for i := 0; i < 18; i++ {
+		// Using inlined getNextWord for performance.
+		var d uint32
+		for k := 0; k < 4; k++ {
+			d = d<<8 | uint32(key[j])
+			j++
+			if j >= len(key) {
+				j = 0
+			}
+		}
+		c.p[i] ^= d
+	}
+
+	var l, r uint32
+	for i := 0; i < 18; i += 2 {
+		l, r = encryptBlock(l, r, c)
+		c.p[i], c.p[i+1] = l, r
+	}
+
+	for i := 0; i < 256; i += 2 {
+		l, r = encryptBlock(l, r, c)
+		c.s0[i], c.s0[i+1] = l, r
+	}
+	for i := 0; i < 256; i += 2 {
+		l, r = encryptBlock(l, r, c)
+		c.s1[i], c.s1[i+1] = l, r
+	}
+	for i := 0; i < 256; i += 2 {
+		l, r = encryptBlock(l, r, c)
+		c.s2[i], c.s2[i+1] = l, r
+	}
+	for i := 0; i < 256; i += 2 {
+		l, r = encryptBlock(l, r, c)
+		c.s3[i], c.s3[i+1] = l, r
+	}
+}
+
+// This is similar to ExpandKey, but folds the salt during the key
+// schedule. While ExpandKey is essentially expandKeyWithSalt with an all-zero
+// salt passed in, reusing ExpandKey turns out to be a place of inefficiency
+// and specializing it here is useful.
+func expandKeyWithSalt(key []byte, salt []byte, c *Cipher) {
+	j := 0
+	for i := 0; i < 18; i++ {
+		c.p[i] ^= getNextWord(key, &j)
+	}
+
+	j = 0
+	var l, r uint32
+	for i := 0; i < 18; i += 2 {
+		l ^= getNextWord(salt, &j)
+		r ^= getNextWord(salt, &j)
+		l, r = encryptBlock(l, r, c)
+		c.p[i], c.p[i+1] = l, r
+	}
+
+	for i := 0; i < 256; i += 2 {
+		l ^= getNextWord(salt, &j)
+		r ^= getNextWord(salt, &j)
+		l, r = encryptBlock(l, r, c)
+		c.s0[i], c.s0[i+1] = l, r
+	}
+
+	for i := 0; i < 256; i += 2 {
+		l ^= getNextWord(salt, &j)
+		r ^= getNextWord(salt, &j)
+		l, r = encryptBlock(l, r, c)
+		c.s1[i], c.s1[i+1] = l, r
+	}
+
+	for i := 0; i < 256; i += 2 {
+		l ^= getNextWord(salt, &j)
+		r ^= getNextWord(salt, &j)
+		l, r = encryptBlock(l, r, c)
+		c.s2[i], c.s2[i+1] = l, r
+	}
+
+	for i := 0; i < 256; i += 2 {
+		l ^= getNextWord(salt, &j)
+		r ^= getNextWord(salt, &j)
+		l, r = encryptBlock(l, r, c)
+		c.s3[i], c.s3[i+1] = l, r
+	}
+}
+
+func encryptBlock(l, r uint32, c *Cipher) (uint32, uint32) {
+	xl, xr := l, r
+	xl ^= c.p[0]
+	xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[1]
+	xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[2]
+	xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[3]
+	xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[4]
+	xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[5]
+	xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[6]
+	xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[7]
+	xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[8]
+	xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[9]
+	xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[10]
+	xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[11]
+	xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[12]
+	xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[13]
+	xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[14]
+	xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[15]
+	xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[16]
+	xr ^= c.p[17]
+	return xr, xl
+}
+
+func decryptBlock(l, r uint32, c *Cipher) (uint32, uint32) {
+	xl, xr := l, r
+	xl ^= c.p[17]
+	xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[16]
+	xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[15]
+	xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[14]
+	xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[13]
+	xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[12]
+	xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[11]
+	xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[10]
+	xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[9]
+	xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[8]
+	xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[7]
+	xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[6]
+	xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[5]
+	xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[4]
+	xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[3]
+	xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[2]
+	xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[1]
+	xr ^= c.p[0]
+	return xr, xl
+}
diff --git a/vendor/golang.org/x/crypto/blowfish/cipher.go b/vendor/golang.org/x/crypto/blowfish/cipher.go
new file mode 100644
index 00000000..213bf204
--- /dev/null
+++ b/vendor/golang.org/x/crypto/blowfish/cipher.go
@@ -0,0 +1,99 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package blowfish implements Bruce Schneier's Blowfish encryption algorithm.
+//
+// Blowfish is a legacy cipher and its short block size makes it vulnerable to
+// birthday bound attacks (see https://sweet32.info). It should only be used
+// where compatibility with legacy systems, not security, is the goal.
+//
+// Deprecated: any new system should use AES (from crypto/aes, if necessary in
+// an AEAD mode like crypto/cipher.NewGCM) or XChaCha20-Poly1305 (from
+// golang.org/x/crypto/chacha20poly1305).
+package blowfish // import "golang.org/x/crypto/blowfish"
+
+// The code is a port of Bruce Schneier's C implementation.
+// See https://www.schneier.com/blowfish.html.
+
+import "strconv"
+
+// The Blowfish block size in bytes.
+const BlockSize = 8
+
+// A Cipher is an instance of Blowfish encryption using a particular key.
+type Cipher struct {
+	p              [18]uint32
+	s0, s1, s2, s3 [256]uint32
+}
+
+type KeySizeError int
+
+func (k KeySizeError) Error() string {
+	return "crypto/blowfish: invalid key size " + strconv.Itoa(int(k))
+}
+
+// NewCipher creates and returns a Cipher.
+// The key argument should be the Blowfish key, from 1 to 56 bytes.
+func NewCipher(key []byte) (*Cipher, error) {
+	var result Cipher
+	if k := len(key); k < 1 || k > 56 {
+		return nil, KeySizeError(k)
+	}
+	initCipher(&result)
+	ExpandKey(key, &result)
+	return &result, nil
+}
+
+// NewSaltedCipher creates a returns a Cipher that folds a salt into its key
+// schedule. For most purposes, NewCipher, instead of NewSaltedCipher, is
+// sufficient and desirable. For bcrypt compatibility, the key can be over 56
+// bytes.
+func NewSaltedCipher(key, salt []byte) (*Cipher, error) {
+	if len(salt) == 0 {
+		return NewCipher(key)
+	}
+	var result Cipher
+	if k := len(key); k < 1 {
+		return nil, KeySizeError(k)
+	}
+	initCipher(&result)
+	expandKeyWithSalt(key, salt, &result)
+	return &result, nil
+}
+
+// BlockSize returns the Blowfish block size, 8 bytes.
+// It is necessary to satisfy the Block interface in the
+// package "crypto/cipher".
+func (c *Cipher) BlockSize() int { return BlockSize }
+
+// Encrypt encrypts the 8-byte buffer src using the key k
+// and stores the result in dst.
+// Note that for amounts of data larger than a block,
+// it is not safe to just call Encrypt on successive blocks;
+// instead, use an encryption mode like CBC (see crypto/cipher/cbc.go).
+func (c *Cipher) Encrypt(dst, src []byte) {
+	l := uint32(src[0])<<24 | uint32(src[1])<<16 | uint32(src[2])<<8 | uint32(src[3])
+	r := uint32(src[4])<<24 | uint32(src[5])<<16 | uint32(src[6])<<8 | uint32(src[7])
+	l, r = encryptBlock(l, r, c)
+	dst[0], dst[1], dst[2], dst[3] = byte(l>>24), byte(l>>16), byte(l>>8), byte(l)
+	dst[4], dst[5], dst[6], dst[7] = byte(r>>24), byte(r>>16), byte(r>>8), byte(r)
+}
+
+// Decrypt decrypts the 8-byte buffer src using the key k
+// and stores the result in dst.
+func (c *Cipher) Decrypt(dst, src []byte) {
+	l := uint32(src[0])<<24 | uint32(src[1])<<16 | uint32(src[2])<<8 | uint32(src[3])
+	r := uint32(src[4])<<24 | uint32(src[5])<<16 | uint32(src[6])<<8 | uint32(src[7])
+	l, r = decryptBlock(l, r, c)
+	dst[0], dst[1], dst[2], dst[3] = byte(l>>24), byte(l>>16), byte(l>>8), byte(l)
+	dst[4], dst[5], dst[6], dst[7] = byte(r>>24), byte(r>>16), byte(r>>8), byte(r)
+}
+
+func initCipher(c *Cipher) {
+	copy(c.p[0:], p[0:])
+	copy(c.s0[0:], s0[0:])
+	copy(c.s1[0:], s1[0:])
+	copy(c.s2[0:], s2[0:])
+	copy(c.s3[0:], s3[0:])
+}
diff --git a/vendor/golang.org/x/crypto/blowfish/const.go b/vendor/golang.org/x/crypto/blowfish/const.go
new file mode 100644
index 00000000..d0407759
--- /dev/null
+++ b/vendor/golang.org/x/crypto/blowfish/const.go
@@ -0,0 +1,199 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// The startup permutation array and substitution boxes.
+// They are the hexadecimal digits of PI; see:
+// https://www.schneier.com/code/constants.txt.
+
+package blowfish
+
+var s0 = [256]uint32{
+	0xd1310ba6, 0x98dfb5ac, 0x2ffd72db, 0xd01adfb7, 0xb8e1afed, 0x6a267e96,
+	0xba7c9045, 0xf12c7f99, 0x24a19947, 0xb3916cf7, 0x0801f2e2, 0x858efc16,
+	0x636920d8, 0x71574e69, 0xa458fea3, 0xf4933d7e, 0x0d95748f, 0x728eb658,
+	0x718bcd58, 0x82154aee, 0x7b54a41d, 0xc25a59b5, 0x9c30d539, 0x2af26013,
+	0xc5d1b023, 0x286085f0, 0xca417918, 0xb8db38ef, 0x8e79dcb0, 0x603a180e,
+	0x6c9e0e8b, 0xb01e8a3e, 0xd71577c1, 0xbd314b27, 0x78af2fda, 0x55605c60,
+	0xe65525f3, 0xaa55ab94, 0x57489862, 0x63e81440, 0x55ca396a, 0x2aab10b6,
+	0xb4cc5c34, 0x1141e8ce, 0xa15486af, 0x7c72e993, 0xb3ee1411, 0x636fbc2a,
+	0x2ba9c55d, 0x741831f6, 0xce5c3e16, 0x9b87931e, 0xafd6ba33, 0x6c24cf5c,
+	0x7a325381, 0x28958677, 0x3b8f4898, 0x6b4bb9af, 0xc4bfe81b, 0x66282193,
+	0x61d809cc, 0xfb21a991, 0x487cac60, 0x5dec8032, 0xef845d5d, 0xe98575b1,
+	0xdc262302, 0xeb651b88, 0x23893e81, 0xd396acc5, 0x0f6d6ff3, 0x83f44239,
+	0x2e0b4482, 0xa4842004, 0x69c8f04a, 0x9e1f9b5e, 0x21c66842, 0xf6e96c9a,
+	0x670c9c61, 0xabd388f0, 0x6a51a0d2, 0xd8542f68, 0x960fa728, 0xab5133a3,
+	0x6eef0b6c, 0x137a3be4, 0xba3bf050, 0x7efb2a98, 0xa1f1651d, 0x39af0176,
+	0x66ca593e, 0x82430e88, 0x8cee8619, 0x456f9fb4, 0x7d84a5c3, 0x3b8b5ebe,
+	0xe06f75d8, 0x85c12073, 0x401a449f, 0x56c16aa6, 0x4ed3aa62, 0x363f7706,
+	0x1bfedf72, 0x429b023d, 0x37d0d724, 0xd00a1248, 0xdb0fead3, 0x49f1c09b,
+	0x075372c9, 0x80991b7b, 0x25d479d8, 0xf6e8def7, 0xe3fe501a, 0xb6794c3b,
+	0x976ce0bd, 0x04c006ba, 0xc1a94fb6, 0x409f60c4, 0x5e5c9ec2, 0x196a2463,
+	0x68fb6faf, 0x3e6c53b5, 0x1339b2eb, 0x3b52ec6f, 0x6dfc511f, 0x9b30952c,
+	0xcc814544, 0xaf5ebd09, 0xbee3d004, 0xde334afd, 0x660f2807, 0x192e4bb3,
+	0xc0cba857, 0x45c8740f, 0xd20b5f39, 0xb9d3fbdb, 0x5579c0bd, 0x1a60320a,
+	0xd6a100c6, 0x402c7279, 0x679f25fe, 0xfb1fa3cc, 0x8ea5e9f8, 0xdb3222f8,
+	0x3c7516df, 0xfd616b15, 0x2f501ec8, 0xad0552ab, 0x323db5fa, 0xfd238760,
+	0x53317b48, 0x3e00df82, 0x9e5c57bb, 0xca6f8ca0, 0x1a87562e, 0xdf1769db,
+	0xd542a8f6, 0x287effc3, 0xac6732c6, 0x8c4f5573, 0x695b27b0, 0xbbca58c8,
+	0xe1ffa35d, 0xb8f011a0, 0x10fa3d98, 0xfd2183b8, 0x4afcb56c, 0x2dd1d35b,
+	0x9a53e479, 0xb6f84565, 0xd28e49bc, 0x4bfb9790, 0xe1ddf2da, 0xa4cb7e33,
+	0x62fb1341, 0xcee4c6e8, 0xef20cada, 0x36774c01, 0xd07e9efe, 0x2bf11fb4,
+	0x95dbda4d, 0xae909198, 0xeaad8e71, 0x6b93d5a0, 0xd08ed1d0, 0xafc725e0,
+	0x8e3c5b2f, 0x8e7594b7, 0x8ff6e2fb, 0xf2122b64, 0x8888b812, 0x900df01c,
+	0x4fad5ea0, 0x688fc31c, 0xd1cff191, 0xb3a8c1ad, 0x2f2f2218, 0xbe0e1777,
+	0xea752dfe, 0x8b021fa1, 0xe5a0cc0f, 0xb56f74e8, 0x18acf3d6, 0xce89e299,
+	0xb4a84fe0, 0xfd13e0b7, 0x7cc43b81, 0xd2ada8d9, 0x165fa266, 0x80957705,
+	0x93cc7314, 0x211a1477, 0xe6ad2065, 0x77b5fa86, 0xc75442f5, 0xfb9d35cf,
+	0xebcdaf0c, 0x7b3e89a0, 0xd6411bd3, 0xae1e7e49, 0x00250e2d, 0x2071b35e,
+	0x226800bb, 0x57b8e0af, 0x2464369b, 0xf009b91e, 0x5563911d, 0x59dfa6aa,
+	0x78c14389, 0xd95a537f, 0x207d5ba2, 0x02e5b9c5, 0x83260376, 0x6295cfa9,
+	0x11c81968, 0x4e734a41, 0xb3472dca, 0x7b14a94a, 0x1b510052, 0x9a532915,
+	0xd60f573f, 0xbc9bc6e4, 0x2b60a476, 0x81e67400, 0x08ba6fb5, 0x571be91f,
+	0xf296ec6b, 0x2a0dd915, 0xb6636521, 0xe7b9f9b6, 0xff34052e, 0xc5855664,
+	0x53b02d5d, 0xa99f8fa1, 0x08ba4799, 0x6e85076a,
+}
+
+var s1 = [256]uint32{
+	0x4b7a70e9, 0xb5b32944, 0xdb75092e, 0xc4192623, 0xad6ea6b0, 0x49a7df7d,
+	0x9cee60b8, 0x8fedb266, 0xecaa8c71, 0x699a17ff, 0x5664526c, 0xc2b19ee1,
+	0x193602a5, 0x75094c29, 0xa0591340, 0xe4183a3e, 0x3f54989a, 0x5b429d65,
+	0x6b8fe4d6, 0x99f73fd6, 0xa1d29c07, 0xefe830f5, 0x4d2d38e6, 0xf0255dc1,
+	0x4cdd2086, 0x8470eb26, 0x6382e9c6, 0x021ecc5e, 0x09686b3f, 0x3ebaefc9,
+	0x3c971814, 0x6b6a70a1, 0x687f3584, 0x52a0e286, 0xb79c5305, 0xaa500737,
+	0x3e07841c, 0x7fdeae5c, 0x8e7d44ec, 0x5716f2b8, 0xb03ada37, 0xf0500c0d,
+	0xf01c1f04, 0x0200b3ff, 0xae0cf51a, 0x3cb574b2, 0x25837a58, 0xdc0921bd,
+	0xd19113f9, 0x7ca92ff6, 0x94324773, 0x22f54701, 0x3ae5e581, 0x37c2dadc,
+	0xc8b57634, 0x9af3dda7, 0xa9446146, 0x0fd0030e, 0xecc8c73e, 0xa4751e41,
+	0xe238cd99, 0x3bea0e2f, 0x3280bba1, 0x183eb331, 0x4e548b38, 0x4f6db908,
+	0x6f420d03, 0xf60a04bf, 0x2cb81290, 0x24977c79, 0x5679b072, 0xbcaf89af,
+	0xde9a771f, 0xd9930810, 0xb38bae12, 0xdccf3f2e, 0x5512721f, 0x2e6b7124,
+	0x501adde6, 0x9f84cd87, 0x7a584718, 0x7408da17, 0xbc9f9abc, 0xe94b7d8c,
+	0xec7aec3a, 0xdb851dfa, 0x63094366, 0xc464c3d2, 0xef1c1847, 0x3215d908,
+	0xdd433b37, 0x24c2ba16, 0x12a14d43, 0x2a65c451, 0x50940002, 0x133ae4dd,
+	0x71dff89e, 0x10314e55, 0x81ac77d6, 0x5f11199b, 0x043556f1, 0xd7a3c76b,
+	0x3c11183b, 0x5924a509, 0xf28fe6ed, 0x97f1fbfa, 0x9ebabf2c, 0x1e153c6e,
+	0x86e34570, 0xeae96fb1, 0x860e5e0a, 0x5a3e2ab3, 0x771fe71c, 0x4e3d06fa,
+	0x2965dcb9, 0x99e71d0f, 0x803e89d6, 0x5266c825, 0x2e4cc978, 0x9c10b36a,
+	0xc6150eba, 0x94e2ea78, 0xa5fc3c53, 0x1e0a2df4, 0xf2f74ea7, 0x361d2b3d,
+	0x1939260f, 0x19c27960, 0x5223a708, 0xf71312b6, 0xebadfe6e, 0xeac31f66,
+	0xe3bc4595, 0xa67bc883, 0xb17f37d1, 0x018cff28, 0xc332ddef, 0xbe6c5aa5,
+	0x65582185, 0x68ab9802, 0xeecea50f, 0xdb2f953b, 0x2aef7dad, 0x5b6e2f84,
+	0x1521b628, 0x29076170, 0xecdd4775, 0x619f1510, 0x13cca830, 0xeb61bd96,
+	0x0334fe1e, 0xaa0363cf, 0xb5735c90, 0x4c70a239, 0xd59e9e0b, 0xcbaade14,
+	0xeecc86bc, 0x60622ca7, 0x9cab5cab, 0xb2f3846e, 0x648b1eaf, 0x19bdf0ca,
+	0xa02369b9, 0x655abb50, 0x40685a32, 0x3c2ab4b3, 0x319ee9d5, 0xc021b8f7,
+	0x9b540b19, 0x875fa099, 0x95f7997e, 0x623d7da8, 0xf837889a, 0x97e32d77,
+	0x11ed935f, 0x16681281, 0x0e358829, 0xc7e61fd6, 0x96dedfa1, 0x7858ba99,
+	0x57f584a5, 0x1b227263, 0x9b83c3ff, 0x1ac24696, 0xcdb30aeb, 0x532e3054,
+	0x8fd948e4, 0x6dbc3128, 0x58ebf2ef, 0x34c6ffea, 0xfe28ed61, 0xee7c3c73,
+	0x5d4a14d9, 0xe864b7e3, 0x42105d14, 0x203e13e0, 0x45eee2b6, 0xa3aaabea,
+	0xdb6c4f15, 0xfacb4fd0, 0xc742f442, 0xef6abbb5, 0x654f3b1d, 0x41cd2105,
+	0xd81e799e, 0x86854dc7, 0xe44b476a, 0x3d816250, 0xcf62a1f2, 0x5b8d2646,
+	0xfc8883a0, 0xc1c7b6a3, 0x7f1524c3, 0x69cb7492, 0x47848a0b, 0x5692b285,
+	0x095bbf00, 0xad19489d, 0x1462b174, 0x23820e00, 0x58428d2a, 0x0c55f5ea,
+	0x1dadf43e, 0x233f7061, 0x3372f092, 0x8d937e41, 0xd65fecf1, 0x6c223bdb,
+	0x7cde3759, 0xcbee7460, 0x4085f2a7, 0xce77326e, 0xa6078084, 0x19f8509e,
+	0xe8efd855, 0x61d99735, 0xa969a7aa, 0xc50c06c2, 0x5a04abfc, 0x800bcadc,
+	0x9e447a2e, 0xc3453484, 0xfdd56705, 0x0e1e9ec9, 0xdb73dbd3, 0x105588cd,
+	0x675fda79, 0xe3674340, 0xc5c43465, 0x713e38d8, 0x3d28f89e, 0xf16dff20,
+	0x153e21e7, 0x8fb03d4a, 0xe6e39f2b, 0xdb83adf7,
+}
+
+var s2 = [256]uint32{
+	0xe93d5a68, 0x948140f7, 0xf64c261c, 0x94692934, 0x411520f7, 0x7602d4f7,
+	0xbcf46b2e, 0xd4a20068, 0xd4082471, 0x3320f46a, 0x43b7d4b7, 0x500061af,
+	0x1e39f62e, 0x97244546, 0x14214f74, 0xbf8b8840, 0x4d95fc1d, 0x96b591af,
+	0x70f4ddd3, 0x66a02f45, 0xbfbc09ec, 0x03bd9785, 0x7fac6dd0, 0x31cb8504,
+	0x96eb27b3, 0x55fd3941, 0xda2547e6, 0xabca0a9a, 0x28507825, 0x530429f4,
+	0x0a2c86da, 0xe9b66dfb, 0x68dc1462, 0xd7486900, 0x680ec0a4, 0x27a18dee,
+	0x4f3ffea2, 0xe887ad8c, 0xb58ce006, 0x7af4d6b6, 0xaace1e7c, 0xd3375fec,
+	0xce78a399, 0x406b2a42, 0x20fe9e35, 0xd9f385b9, 0xee39d7ab, 0x3b124e8b,
+	0x1dc9faf7, 0x4b6d1856, 0x26a36631, 0xeae397b2, 0x3a6efa74, 0xdd5b4332,
+	0x6841e7f7, 0xca7820fb, 0xfb0af54e, 0xd8feb397, 0x454056ac, 0xba489527,
+	0x55533a3a, 0x20838d87, 0xfe6ba9b7, 0xd096954b, 0x55a867bc, 0xa1159a58,
+	0xcca92963, 0x99e1db33, 0xa62a4a56, 0x3f3125f9, 0x5ef47e1c, 0x9029317c,
+	0xfdf8e802, 0x04272f70, 0x80bb155c, 0x05282ce3, 0x95c11548, 0xe4c66d22,
+	0x48c1133f, 0xc70f86dc, 0x07f9c9ee, 0x41041f0f, 0x404779a4, 0x5d886e17,
+	0x325f51eb, 0xd59bc0d1, 0xf2bcc18f, 0x41113564, 0x257b7834, 0x602a9c60,
+	0xdff8e8a3, 0x1f636c1b, 0x0e12b4c2, 0x02e1329e, 0xaf664fd1, 0xcad18115,
+	0x6b2395e0, 0x333e92e1, 0x3b240b62, 0xeebeb922, 0x85b2a20e, 0xe6ba0d99,
+	0xde720c8c, 0x2da2f728, 0xd0127845, 0x95b794fd, 0x647d0862, 0xe7ccf5f0,
+	0x5449a36f, 0x877d48fa, 0xc39dfd27, 0xf33e8d1e, 0x0a476341, 0x992eff74,
+	0x3a6f6eab, 0xf4f8fd37, 0xa812dc60, 0xa1ebddf8, 0x991be14c, 0xdb6e6b0d,
+	0xc67b5510, 0x6d672c37, 0x2765d43b, 0xdcd0e804, 0xf1290dc7, 0xcc00ffa3,
+	0xb5390f92, 0x690fed0b, 0x667b9ffb, 0xcedb7d9c, 0xa091cf0b, 0xd9155ea3,
+	0xbb132f88, 0x515bad24, 0x7b9479bf, 0x763bd6eb, 0x37392eb3, 0xcc115979,
+	0x8026e297, 0xf42e312d, 0x6842ada7, 0xc66a2b3b, 0x12754ccc, 0x782ef11c,
+	0x6a124237, 0xb79251e7, 0x06a1bbe6, 0x4bfb6350, 0x1a6b1018, 0x11caedfa,
+	0x3d25bdd8, 0xe2e1c3c9, 0x44421659, 0x0a121386, 0xd90cec6e, 0xd5abea2a,
+	0x64af674e, 0xda86a85f, 0xbebfe988, 0x64e4c3fe, 0x9dbc8057, 0xf0f7c086,
+	0x60787bf8, 0x6003604d, 0xd1fd8346, 0xf6381fb0, 0x7745ae04, 0xd736fccc,
+	0x83426b33, 0xf01eab71, 0xb0804187, 0x3c005e5f, 0x77a057be, 0xbde8ae24,
+	0x55464299, 0xbf582e61, 0x4e58f48f, 0xf2ddfda2, 0xf474ef38, 0x8789bdc2,
+	0x5366f9c3, 0xc8b38e74, 0xb475f255, 0x46fcd9b9, 0x7aeb2661, 0x8b1ddf84,
+	0x846a0e79, 0x915f95e2, 0x466e598e, 0x20b45770, 0x8cd55591, 0xc902de4c,
+	0xb90bace1, 0xbb8205d0, 0x11a86248, 0x7574a99e, 0xb77f19b6, 0xe0a9dc09,
+	0x662d09a1, 0xc4324633, 0xe85a1f02, 0x09f0be8c, 0x4a99a025, 0x1d6efe10,
+	0x1ab93d1d, 0x0ba5a4df, 0xa186f20f, 0x2868f169, 0xdcb7da83, 0x573906fe,
+	0xa1e2ce9b, 0x4fcd7f52, 0x50115e01, 0xa70683fa, 0xa002b5c4, 0x0de6d027,
+	0x9af88c27, 0x773f8641, 0xc3604c06, 0x61a806b5, 0xf0177a28, 0xc0f586e0,
+	0x006058aa, 0x30dc7d62, 0x11e69ed7, 0x2338ea63, 0x53c2dd94, 0xc2c21634,
+	0xbbcbee56, 0x90bcb6de, 0xebfc7da1, 0xce591d76, 0x6f05e409, 0x4b7c0188,
+	0x39720a3d, 0x7c927c24, 0x86e3725f, 0x724d9db9, 0x1ac15bb4, 0xd39eb8fc,
+	0xed545578, 0x08fca5b5, 0xd83d7cd3, 0x4dad0fc4, 0x1e50ef5e, 0xb161e6f8,
+	0xa28514d9, 0x6c51133c, 0x6fd5c7e7, 0x56e14ec4, 0x362abfce, 0xddc6c837,
+	0xd79a3234, 0x92638212, 0x670efa8e, 0x406000e0,
+}
+
+var s3 = [256]uint32{
+	0x3a39ce37, 0xd3faf5cf, 0xabc27737, 0x5ac52d1b, 0x5cb0679e, 0x4fa33742,
+	0xd3822740, 0x99bc9bbe, 0xd5118e9d, 0xbf0f7315, 0xd62d1c7e, 0xc700c47b,
+	0xb78c1b6b, 0x21a19045, 0xb26eb1be, 0x6a366eb4, 0x5748ab2f, 0xbc946e79,
+	0xc6a376d2, 0x6549c2c8, 0x530ff8ee, 0x468dde7d, 0xd5730a1d, 0x4cd04dc6,
+	0x2939bbdb, 0xa9ba4650, 0xac9526e8, 0xbe5ee304, 0xa1fad5f0, 0x6a2d519a,
+	0x63ef8ce2, 0x9a86ee22, 0xc089c2b8, 0x43242ef6, 0xa51e03aa, 0x9cf2d0a4,
+	0x83c061ba, 0x9be96a4d, 0x8fe51550, 0xba645bd6, 0x2826a2f9, 0xa73a3ae1,
+	0x4ba99586, 0xef5562e9, 0xc72fefd3, 0xf752f7da, 0x3f046f69, 0x77fa0a59,
+	0x80e4a915, 0x87b08601, 0x9b09e6ad, 0x3b3ee593, 0xe990fd5a, 0x9e34d797,
+	0x2cf0b7d9, 0x022b8b51, 0x96d5ac3a, 0x017da67d, 0xd1cf3ed6, 0x7c7d2d28,
+	0x1f9f25cf, 0xadf2b89b, 0x5ad6b472, 0x5a88f54c, 0xe029ac71, 0xe019a5e6,
+	0x47b0acfd, 0xed93fa9b, 0xe8d3c48d, 0x283b57cc, 0xf8d56629, 0x79132e28,
+	0x785f0191, 0xed756055, 0xf7960e44, 0xe3d35e8c, 0x15056dd4, 0x88f46dba,
+	0x03a16125, 0x0564f0bd, 0xc3eb9e15, 0x3c9057a2, 0x97271aec, 0xa93a072a,
+	0x1b3f6d9b, 0x1e6321f5, 0xf59c66fb, 0x26dcf319, 0x7533d928, 0xb155fdf5,
+	0x03563482, 0x8aba3cbb, 0x28517711, 0xc20ad9f8, 0xabcc5167, 0xccad925f,
+	0x4de81751, 0x3830dc8e, 0x379d5862, 0x9320f991, 0xea7a90c2, 0xfb3e7bce,
+	0x5121ce64, 0x774fbe32, 0xa8b6e37e, 0xc3293d46, 0x48de5369, 0x6413e680,
+	0xa2ae0810, 0xdd6db224, 0x69852dfd, 0x09072166, 0xb39a460a, 0x6445c0dd,
+	0x586cdecf, 0x1c20c8ae, 0x5bbef7dd, 0x1b588d40, 0xccd2017f, 0x6bb4e3bb,
+	0xdda26a7e, 0x3a59ff45, 0x3e350a44, 0xbcb4cdd5, 0x72eacea8, 0xfa6484bb,
+	0x8d6612ae, 0xbf3c6f47, 0xd29be463, 0x542f5d9e, 0xaec2771b, 0xf64e6370,
+	0x740e0d8d, 0xe75b1357, 0xf8721671, 0xaf537d5d, 0x4040cb08, 0x4eb4e2cc,
+	0x34d2466a, 0x0115af84, 0xe1b00428, 0x95983a1d, 0x06b89fb4, 0xce6ea048,
+	0x6f3f3b82, 0x3520ab82, 0x011a1d4b, 0x277227f8, 0x611560b1, 0xe7933fdc,
+	0xbb3a792b, 0x344525bd, 0xa08839e1, 0x51ce794b, 0x2f32c9b7, 0xa01fbac9,
+	0xe01cc87e, 0xbcc7d1f6, 0xcf0111c3, 0xa1e8aac7, 0x1a908749, 0xd44fbd9a,
+	0xd0dadecb, 0xd50ada38, 0x0339c32a, 0xc6913667, 0x8df9317c, 0xe0b12b4f,
+	0xf79e59b7, 0x43f5bb3a, 0xf2d519ff, 0x27d9459c, 0xbf97222c, 0x15e6fc2a,
+	0x0f91fc71, 0x9b941525, 0xfae59361, 0xceb69ceb, 0xc2a86459, 0x12baa8d1,
+	0xb6c1075e, 0xe3056a0c, 0x10d25065, 0xcb03a442, 0xe0ec6e0e, 0x1698db3b,
+	0x4c98a0be, 0x3278e964, 0x9f1f9532, 0xe0d392df, 0xd3a0342b, 0x8971f21e,
+	0x1b0a7441, 0x4ba3348c, 0xc5be7120, 0xc37632d8, 0xdf359f8d, 0x9b992f2e,
+	0xe60b6f47, 0x0fe3f11d, 0xe54cda54, 0x1edad891, 0xce6279cf, 0xcd3e7e6f,
+	0x1618b166, 0xfd2c1d05, 0x848fd2c5, 0xf6fb2299, 0xf523f357, 0xa6327623,
+	0x93a83531, 0x56cccd02, 0xacf08162, 0x5a75ebb5, 0x6e163697, 0x88d273cc,
+	0xde966292, 0x81b949d0, 0x4c50901b, 0x71c65614, 0xe6c6c7bd, 0x327a140a,
+	0x45e1d006, 0xc3f27b9a, 0xc9aa53fd, 0x62a80f00, 0xbb25bfe2, 0x35bdd2f6,
+	0x71126905, 0xb2040222, 0xb6cbcf7c, 0xcd769c2b, 0x53113ec0, 0x1640e3d3,
+	0x38abbd60, 0x2547adf0, 0xba38209c, 0xf746ce76, 0x77afa1c5, 0x20756060,
+	0x85cbfe4e, 0x8ae88dd8, 0x7aaaf9b0, 0x4cf9aa7e, 0x1948c25c, 0x02fb8a8c,
+	0x01c36ae4, 0xd6ebe1f9, 0x90d4f869, 0xa65cdea0, 0x3f09252d, 0xc208e69f,
+	0xb74e6132, 0xce77e25b, 0x578fdfe3, 0x3ac372e6,
+}
+
+var p = [18]uint32{
+	0x243f6a88, 0x85a308d3, 0x13198a2e, 0x03707344, 0xa4093822, 0x299f31d0,
+	0x082efa98, 0xec4e6c89, 0x452821e6, 0x38d01377, 0xbe5466cf, 0x34e90c6c,
+	0xc0ac29b7, 0xc97c50dd, 0x3f84d5b5, 0xb5470917, 0x9216d5d9, 0x8979fb1b,
+}
diff --git a/vendor/golang.org/x/crypto/poly1305/sum_arm.go b/vendor/golang.org/x/crypto/poly1305/sum_arm.go
deleted file mode 100644
index 6e695e42..00000000
--- a/vendor/golang.org/x/crypto/poly1305/sum_arm.go
+++ /dev/null
@@ -1,19 +0,0 @@
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build arm,!gccgo,!appengine,!nacl
-
-package poly1305
-
-// poly1305_auth_armv6 is implemented in sum_arm.s
-//go:noescape
-func poly1305_auth_armv6(out *[16]byte, m *byte, mlen uint32, key *[32]byte)
-
-func sum(out *[16]byte, m []byte, key *[32]byte) {
-	var mPtr *byte
-	if len(m) > 0 {
-		mPtr = &m[0]
-	}
-	poly1305_auth_armv6(out, mPtr, uint32(len(m)), key)
-}
diff --git a/vendor/golang.org/x/crypto/poly1305/sum_arm.s b/vendor/golang.org/x/crypto/poly1305/sum_arm.s
deleted file mode 100644
index f70b4ac4..00000000
--- a/vendor/golang.org/x/crypto/poly1305/sum_arm.s
+++ /dev/null
@@ -1,427 +0,0 @@
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build arm,!gccgo,!appengine,!nacl
-
-#include "textflag.h"
-
-// This code was translated into a form compatible with 5a from the public
-// domain source by Andrew Moon: github.com/floodyberry/poly1305-opt/blob/master/app/extensions/poly1305.
-
-DATA ·poly1305_init_constants_armv6<>+0x00(SB)/4, $0x3ffffff
-DATA ·poly1305_init_constants_armv6<>+0x04(SB)/4, $0x3ffff03
-DATA ·poly1305_init_constants_armv6<>+0x08(SB)/4, $0x3ffc0ff
-DATA ·poly1305_init_constants_armv6<>+0x0c(SB)/4, $0x3f03fff
-DATA ·poly1305_init_constants_armv6<>+0x10(SB)/4, $0x00fffff
-GLOBL ·poly1305_init_constants_armv6<>(SB), 8, $20
-
-// Warning: the linker may use R11 to synthesize certain instructions. Please
-// take care and verify that no synthetic instructions use it.
-
-TEXT poly1305_init_ext_armv6<>(SB), NOSPLIT, $0
-	// Needs 16 bytes of stack and 64 bytes of space pointed to by R0.  (It
-	// might look like it's only 60 bytes of space but the final four bytes
-	// will be written by another function.) We need to skip over four
-	// bytes of stack because that's saving the value of 'g'.
-	ADD       $4, R13, R8
-	MOVM.IB   [R4-R7], (R8)
-	MOVM.IA.W (R1), [R2-R5]
-	MOVW      $·poly1305_init_constants_armv6<>(SB), R7
-	MOVW      R2, R8
-	MOVW      R2>>26, R9
-	MOVW      R3>>20, g
-	MOVW      R4>>14, R11
-	MOVW      R5>>8, R12
-	ORR       R3<<6, R9, R9
-	ORR       R4<<12, g, g
-	ORR       R5<<18, R11, R11
-	MOVM.IA   (R7), [R2-R6]
-	AND       R8, R2, R2
-	AND       R9, R3, R3
-	AND       g, R4, R4
-	AND       R11, R5, R5
-	AND       R12, R6, R6
-	MOVM.IA.W [R2-R6], (R0)
-	EOR       R2, R2, R2
-	EOR       R3, R3, R3
-	EOR       R4, R4, R4
-	EOR       R5, R5, R5
-	EOR       R6, R6, R6
-	MOVM.IA.W [R2-R6], (R0)
-	MOVM.IA.W (R1), [R2-R5]
-	MOVM.IA   [R2-R6], (R0)
-	ADD       $20, R13, R0
-	MOVM.DA   (R0), [R4-R7]
-	RET
-
-#define MOVW_UNALIGNED(Rsrc, Rdst, Rtmp, offset) \
-	MOVBU (offset+0)(Rsrc), Rtmp; \
-	MOVBU Rtmp, (offset+0)(Rdst); \
-	MOVBU (offset+1)(Rsrc), Rtmp; \
-	MOVBU Rtmp, (offset+1)(Rdst); \
-	MOVBU (offset+2)(Rsrc), Rtmp; \
-	MOVBU Rtmp, (offset+2)(Rdst); \
-	MOVBU (offset+3)(Rsrc), Rtmp; \
-	MOVBU Rtmp, (offset+3)(Rdst)
-
-TEXT poly1305_blocks_armv6<>(SB), NOSPLIT, $0
-	// Needs 24 bytes of stack for saved registers and then 88 bytes of
-	// scratch space after that. We assume that 24 bytes at (R13) have
-	// already been used: four bytes for the link register saved in the
-	// prelude of poly1305_auth_armv6, four bytes for saving the value of g
-	// in that function and 16 bytes of scratch space used around
-	// poly1305_finish_ext_armv6_skip1.
-	ADD     $24, R13, R12
-	MOVM.IB [R4-R8, R14], (R12)
-	MOVW    R0, 88(R13)
-	MOVW    R1, 92(R13)
-	MOVW    R2, 96(R13)
-	MOVW    R1, R14
-	MOVW    R2, R12
-	MOVW    56(R0), R8
-	WORD    $0xe1180008                // TST R8, R8 not working see issue 5921
-	EOR     R6, R6, R6
-	MOVW.EQ $(1<<24), R6
-	MOVW    R6, 84(R13)
-	ADD     $116, R13, g
-	MOVM.IA (R0), [R0-R9]
-	MOVM.IA [R0-R4], (g)
-	CMP     $16, R12
-	BLO     poly1305_blocks_armv6_done
-
-poly1305_blocks_armv6_mainloop:
-	WORD    $0xe31e0003                            // TST R14, #3 not working see issue 5921
-	BEQ     poly1305_blocks_armv6_mainloop_aligned
-	ADD     $100, R13, g
-	MOVW_UNALIGNED(R14, g, R0, 0)
-	MOVW_UNALIGNED(R14, g, R0, 4)
-	MOVW_UNALIGNED(R14, g, R0, 8)
-	MOVW_UNALIGNED(R14, g, R0, 12)
-	MOVM.IA (g), [R0-R3]
-	ADD     $16, R14
-	B       poly1305_blocks_armv6_mainloop_loaded
-
-poly1305_blocks_armv6_mainloop_aligned:
-	MOVM.IA.W (R14), [R0-R3]
-
-poly1305_blocks_armv6_mainloop_loaded:
-	MOVW    R0>>26, g
-	MOVW    R1>>20, R11
-	MOVW    R2>>14, R12
-	MOVW    R14, 92(R13)
-	MOVW    R3>>8, R4
-	ORR     R1<<6, g, g
-	ORR     R2<<12, R11, R11
-	ORR     R3<<18, R12, R12
-	BIC     $0xfc000000, R0, R0
-	BIC     $0xfc000000, g, g
-	MOVW    84(R13), R3
-	BIC     $0xfc000000, R11, R11
-	BIC     $0xfc000000, R12, R12
-	ADD     R0, R5, R5
-	ADD     g, R6, R6
-	ORR     R3, R4, R4
-	ADD     R11, R7, R7
-	ADD     $116, R13, R14
-	ADD     R12, R8, R8
-	ADD     R4, R9, R9
-	MOVM.IA (R14), [R0-R4]
-	MULLU   R4, R5, (R11, g)
-	MULLU   R3, R5, (R14, R12)
-	MULALU  R3, R6, (R11, g)
-	MULALU  R2, R6, (R14, R12)
-	MULALU  R2, R7, (R11, g)
-	MULALU  R1, R7, (R14, R12)
-	ADD     R4<<2, R4, R4
-	ADD     R3<<2, R3, R3
-	MULALU  R1, R8, (R11, g)
-	MULALU  R0, R8, (R14, R12)
-	MULALU  R0, R9, (R11, g)
-	MULALU  R4, R9, (R14, R12)
-	MOVW    g, 76(R13)
-	MOVW    R11, 80(R13)
-	MOVW    R12, 68(R13)
-	MOVW    R14, 72(R13)
-	MULLU   R2, R5, (R11, g)
-	MULLU   R1, R5, (R14, R12)
-	MULALU  R1, R6, (R11, g)
-	MULALU  R0, R6, (R14, R12)
-	MULALU  R0, R7, (R11, g)
-	MULALU  R4, R7, (R14, R12)
-	ADD     R2<<2, R2, R2
-	ADD     R1<<2, R1, R1
-	MULALU  R4, R8, (R11, g)
-	MULALU  R3, R8, (R14, R12)
-	MULALU  R3, R9, (R11, g)
-	MULALU  R2, R9, (R14, R12)
-	MOVW    g, 60(R13)
-	MOVW    R11, 64(R13)
-	MOVW    R12, 52(R13)
-	MOVW    R14, 56(R13)
-	MULLU   R0, R5, (R11, g)
-	MULALU  R4, R6, (R11, g)
-	MULALU  R3, R7, (R11, g)
-	MULALU  R2, R8, (R11, g)
-	MULALU  R1, R9, (R11, g)
-	ADD     $52, R13, R0
-	MOVM.IA (R0), [R0-R7]
-	MOVW    g>>26, R12
-	MOVW    R4>>26, R14
-	ORR     R11<<6, R12, R12
-	ORR     R5<<6, R14, R14
-	BIC     $0xfc000000, g, g
-	BIC     $0xfc000000, R4, R4
-	ADD.S   R12, R0, R0
-	ADC     $0, R1, R1
-	ADD.S   R14, R6, R6
-	ADC     $0, R7, R7
-	MOVW    R0>>26, R12
-	MOVW    R6>>26, R14
-	ORR     R1<<6, R12, R12
-	ORR     R7<<6, R14, R14
-	BIC     $0xfc000000, R0, R0
-	BIC     $0xfc000000, R6, R6
-	ADD     R14<<2, R14, R14
-	ADD.S   R12, R2, R2
-	ADC     $0, R3, R3
-	ADD     R14, g, g
-	MOVW    R2>>26, R12
-	MOVW    g>>26, R14
-	ORR     R3<<6, R12, R12
-	BIC     $0xfc000000, g, R5
-	BIC     $0xfc000000, R2, R7
-	ADD     R12, R4, R4
-	ADD     R14, R0, R0
-	MOVW    R4>>26, R12
-	BIC     $0xfc000000, R4, R8
-	ADD     R12, R6, R9
-	MOVW    96(R13), R12
-	MOVW    92(R13), R14
-	MOVW    R0, R6
-	CMP     $32, R12
-	SUB     $16, R12, R12
-	MOVW    R12, 96(R13)
-	BHS     poly1305_blocks_armv6_mainloop
-
-poly1305_blocks_armv6_done:
-	MOVW    88(R13), R12
-	MOVW    R5, 20(R12)
-	MOVW    R6, 24(R12)
-	MOVW    R7, 28(R12)
-	MOVW    R8, 32(R12)
-	MOVW    R9, 36(R12)
-	ADD     $48, R13, R0
-	MOVM.DA (R0), [R4-R8, R14]
-	RET
-
-#define MOVHUP_UNALIGNED(Rsrc, Rdst, Rtmp) \
-	MOVBU.P 1(Rsrc), Rtmp; \
-	MOVBU.P Rtmp, 1(Rdst); \
-	MOVBU.P 1(Rsrc), Rtmp; \
-	MOVBU.P Rtmp, 1(Rdst)
-
-#define MOVWP_UNALIGNED(Rsrc, Rdst, Rtmp) \
-	MOVHUP_UNALIGNED(Rsrc, Rdst, Rtmp); \
-	MOVHUP_UNALIGNED(Rsrc, Rdst, Rtmp)
-
-// func poly1305_auth_armv6(out *[16]byte, m *byte, mlen uint32, key *[32]key)
-TEXT ·poly1305_auth_armv6(SB), $196-16
-	// The value 196, just above, is the sum of 64 (the size of the context
-	// structure) and 132 (the amount of stack needed).
-	//
-	// At this point, the stack pointer (R13) has been moved down. It
-	// points to the saved link register and there's 196 bytes of free
-	// space above it.
-	//
-	// The stack for this function looks like:
-	//
-	// +---------------------
-	// |
-	// | 64 bytes of context structure
-	// |
-	// +---------------------
-	// |
-	// | 112 bytes for poly1305_blocks_armv6
-	// |
-	// +---------------------
-	// | 16 bytes of final block, constructed at
-	// | poly1305_finish_ext_armv6_skip8
-	// +---------------------
-	// | four bytes of saved 'g'
-	// +---------------------
-	// | lr, saved by prelude    <- R13 points here
-	// +---------------------
-	MOVW g, 4(R13)
-
-	MOVW out+0(FP), R4
-	MOVW m+4(FP), R5
-	MOVW mlen+8(FP), R6
-	MOVW key+12(FP), R7
-
-	ADD  $136, R13, R0 // 136 = 4 + 4 + 16 + 112
-	MOVW R7, R1
-
-	// poly1305_init_ext_armv6 will write to the stack from R13+4, but
-	// that's ok because none of the other values have been written yet.
-	BL    poly1305_init_ext_armv6<>(SB)
-	BIC.S $15, R6, R2
-	BEQ   poly1305_auth_armv6_noblocks
-	ADD   $136, R13, R0
-	MOVW  R5, R1
-	ADD   R2, R5, R5
-	SUB   R2, R6, R6
-	BL    poly1305_blocks_armv6<>(SB)
-
-poly1305_auth_armv6_noblocks:
-	ADD  $136, R13, R0
-	MOVW R5, R1
-	MOVW R6, R2
-	MOVW R4, R3
-
-	MOVW  R0, R5
-	MOVW  R1, R6
-	MOVW  R2, R7
-	MOVW  R3, R8
-	AND.S R2, R2, R2
-	BEQ   poly1305_finish_ext_armv6_noremaining
-	EOR   R0, R0
-	ADD   $8, R13, R9                           // 8 = offset to 16 byte scratch space
-	MOVW  R0, (R9)
-	MOVW  R0, 4(R9)
-	MOVW  R0, 8(R9)
-	MOVW  R0, 12(R9)
-	WORD  $0xe3110003                           // TST R1, #3 not working see issue 5921
-	BEQ   poly1305_finish_ext_armv6_aligned
-	WORD  $0xe3120008                           // TST R2, #8 not working see issue 5921
-	BEQ   poly1305_finish_ext_armv6_skip8
-	MOVWP_UNALIGNED(R1, R9, g)
-	MOVWP_UNALIGNED(R1, R9, g)
-
-poly1305_finish_ext_armv6_skip8:
-	WORD $0xe3120004                     // TST $4, R2 not working see issue 5921
-	BEQ  poly1305_finish_ext_armv6_skip4
-	MOVWP_UNALIGNED(R1, R9, g)
-
-poly1305_finish_ext_armv6_skip4:
-	WORD $0xe3120002                     // TST $2, R2 not working see issue 5921
-	BEQ  poly1305_finish_ext_armv6_skip2
-	MOVHUP_UNALIGNED(R1, R9, g)
-	B    poly1305_finish_ext_armv6_skip2
-
-poly1305_finish_ext_armv6_aligned:
-	WORD      $0xe3120008                             // TST R2, #8 not working see issue 5921
-	BEQ       poly1305_finish_ext_armv6_skip8_aligned
-	MOVM.IA.W (R1), [g-R11]
-	MOVM.IA.W [g-R11], (R9)
-
-poly1305_finish_ext_armv6_skip8_aligned:
-	WORD   $0xe3120004                             // TST $4, R2 not working see issue 5921
-	BEQ    poly1305_finish_ext_armv6_skip4_aligned
-	MOVW.P 4(R1), g
-	MOVW.P g, 4(R9)
-
-poly1305_finish_ext_armv6_skip4_aligned:
-	WORD    $0xe3120002                     // TST $2, R2 not working see issue 5921
-	BEQ     poly1305_finish_ext_armv6_skip2
-	MOVHU.P 2(R1), g
-	MOVH.P  g, 2(R9)
-
-poly1305_finish_ext_armv6_skip2:
-	WORD    $0xe3120001                     // TST $1, R2 not working see issue 5921
-	BEQ     poly1305_finish_ext_armv6_skip1
-	MOVBU.P 1(R1), g
-	MOVBU.P g, 1(R9)
-
-poly1305_finish_ext_armv6_skip1:
-	MOVW  $1, R11
-	MOVBU R11, 0(R9)
-	MOVW  R11, 56(R5)
-	MOVW  R5, R0
-	ADD   $8, R13, R1
-	MOVW  $16, R2
-	BL    poly1305_blocks_armv6<>(SB)
-
-poly1305_finish_ext_armv6_noremaining:
-	MOVW      20(R5), R0
-	MOVW      24(R5), R1
-	MOVW      28(R5), R2
-	MOVW      32(R5), R3
-	MOVW      36(R5), R4
-	MOVW      R4>>26, R12
-	BIC       $0xfc000000, R4, R4
-	ADD       R12<<2, R12, R12
-	ADD       R12, R0, R0
-	MOVW      R0>>26, R12
-	BIC       $0xfc000000, R0, R0
-	ADD       R12, R1, R1
-	MOVW      R1>>26, R12
-	BIC       $0xfc000000, R1, R1
-	ADD       R12, R2, R2
-	MOVW      R2>>26, R12
-	BIC       $0xfc000000, R2, R2
-	ADD       R12, R3, R3
-	MOVW      R3>>26, R12
-	BIC       $0xfc000000, R3, R3
-	ADD       R12, R4, R4
-	ADD       $5, R0, R6
-	MOVW      R6>>26, R12
-	BIC       $0xfc000000, R6, R6
-	ADD       R12, R1, R7
-	MOVW      R7>>26, R12
-	BIC       $0xfc000000, R7, R7
-	ADD       R12, R2, g
-	MOVW      g>>26, R12
-	BIC       $0xfc000000, g, g
-	ADD       R12, R3, R11
-	MOVW      $-(1<<26), R12
-	ADD       R11>>26, R12, R12
-	BIC       $0xfc000000, R11, R11
-	ADD       R12, R4, R9
-	MOVW      R9>>31, R12
-	SUB       $1, R12
-	AND       R12, R6, R6
-	AND       R12, R7, R7
-	AND       R12, g, g
-	AND       R12, R11, R11
-	AND       R12, R9, R9
-	MVN       R12, R12
-	AND       R12, R0, R0
-	AND       R12, R1, R1
-	AND       R12, R2, R2
-	AND       R12, R3, R3
-	AND       R12, R4, R4
-	ORR       R6, R0, R0
-	ORR       R7, R1, R1
-	ORR       g, R2, R2
-	ORR       R11, R3, R3
-	ORR       R9, R4, R4
-	ORR       R1<<26, R0, R0
-	MOVW      R1>>6, R1
-	ORR       R2<<20, R1, R1
-	MOVW      R2>>12, R2
-	ORR       R3<<14, R2, R2
-	MOVW      R3>>18, R3
-	ORR       R4<<8, R3, R3
-	MOVW      40(R5), R6
-	MOVW      44(R5), R7
-	MOVW      48(R5), g
-	MOVW      52(R5), R11
-	ADD.S     R6, R0, R0
-	ADC.S     R7, R1, R1
-	ADC.S     g, R2, R2
-	ADC.S     R11, R3, R3
-	MOVM.IA   [R0-R3], (R8)
-	MOVW      R5, R12
-	EOR       R0, R0, R0
-	EOR       R1, R1, R1
-	EOR       R2, R2, R2
-	EOR       R3, R3, R3
-	EOR       R4, R4, R4
-	EOR       R5, R5, R5
-	EOR       R6, R6, R6
-	EOR       R7, R7, R7
-	MOVM.IA.W [R0-R7], (R12)
-	MOVM.IA   [R0-R7], (R12)
-	MOVW      4(R13), g
-	RET
diff --git a/vendor/golang.org/x/crypto/poly1305/sum_noasm.go b/vendor/golang.org/x/crypto/poly1305/sum_noasm.go
index 1682eda4..32a9cef6 100644
--- a/vendor/golang.org/x/crypto/poly1305/sum_noasm.go
+++ b/vendor/golang.org/x/crypto/poly1305/sum_noasm.go
@@ -2,7 +2,7 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-// +build s390x,!go1.11 !arm,!amd64,!s390x,!ppc64le gccgo appengine nacl
+// +build s390x,!go1.11 !amd64,!s390x,!ppc64le gccgo appengine nacl
 
 package poly1305
 
diff --git a/vendor/golang.org/x/crypto/ssh/certs.go b/vendor/golang.org/x/crypto/ssh/certs.go
index 00ed9923..0f89aec1 100644
--- a/vendor/golang.org/x/crypto/ssh/certs.go
+++ b/vendor/golang.org/x/crypto/ssh/certs.go
@@ -17,12 +17,14 @@ import (
 // These constants from [PROTOCOL.certkeys] represent the algorithm names
 // for certificate types supported by this package.
 const (
-	CertAlgoRSAv01      = "ssh-rsa-cert-v01@openssh.com"
-	CertAlgoDSAv01      = "ssh-dss-cert-v01@openssh.com"
-	CertAlgoECDSA256v01 = "ecdsa-sha2-nistp256-cert-v01@openssh.com"
-	CertAlgoECDSA384v01 = "ecdsa-sha2-nistp384-cert-v01@openssh.com"
-	CertAlgoECDSA521v01 = "ecdsa-sha2-nistp521-cert-v01@openssh.com"
-	CertAlgoED25519v01  = "ssh-ed25519-cert-v01@openssh.com"
+	CertAlgoRSAv01        = "ssh-rsa-cert-v01@openssh.com"
+	CertAlgoDSAv01        = "ssh-dss-cert-v01@openssh.com"
+	CertAlgoECDSA256v01   = "ecdsa-sha2-nistp256-cert-v01@openssh.com"
+	CertAlgoECDSA384v01   = "ecdsa-sha2-nistp384-cert-v01@openssh.com"
+	CertAlgoECDSA521v01   = "ecdsa-sha2-nistp521-cert-v01@openssh.com"
+	CertAlgoSKECDSA256v01 = "sk-ecdsa-sha2-nistp256-cert-v01@openssh.com"
+	CertAlgoED25519v01    = "ssh-ed25519-cert-v01@openssh.com"
+	CertAlgoSKED25519v01  = "sk-ssh-ed25519-cert-v01@openssh.com"
 )
 
 // Certificate types distinguish between host and user
@@ -37,6 +39,7 @@ const (
 type Signature struct {
 	Format string
 	Blob   []byte
+	Rest   []byte `ssh:"rest"`
 }
 
 // CertTimeInfinity can be used for OpenSSHCertV01.ValidBefore to indicate that
@@ -429,12 +432,14 @@ func (c *Certificate) SignCert(rand io.Reader, authority Signer) error {
 }
 
 var certAlgoNames = map[string]string{
-	KeyAlgoRSA:      CertAlgoRSAv01,
-	KeyAlgoDSA:      CertAlgoDSAv01,
-	KeyAlgoECDSA256: CertAlgoECDSA256v01,
-	KeyAlgoECDSA384: CertAlgoECDSA384v01,
-	KeyAlgoECDSA521: CertAlgoECDSA521v01,
-	KeyAlgoED25519:  CertAlgoED25519v01,
+	KeyAlgoRSA:        CertAlgoRSAv01,
+	KeyAlgoDSA:        CertAlgoDSAv01,
+	KeyAlgoECDSA256:   CertAlgoECDSA256v01,
+	KeyAlgoECDSA384:   CertAlgoECDSA384v01,
+	KeyAlgoECDSA521:   CertAlgoECDSA521v01,
+	KeyAlgoSKECDSA256: CertAlgoSKECDSA256v01,
+	KeyAlgoED25519:    CertAlgoED25519v01,
+	KeyAlgoSKED25519:  CertAlgoSKED25519v01,
 }
 
 // certToPrivAlgo returns the underlying algorithm for a certificate algorithm.
@@ -518,6 +523,12 @@ func parseSignatureBody(in []byte) (out *Signature, rest []byte, ok bool) {
 		return
 	}
 
+	switch out.Format {
+	case KeyAlgoSKECDSA256, CertAlgoSKECDSA256v01, KeyAlgoSKED25519, CertAlgoSKED25519v01:
+		out.Rest = in
+		return out, nil, ok
+	}
+
 	return out, in, ok
 }
 
diff --git a/vendor/golang.org/x/crypto/ssh/internal/bcrypt_pbkdf/bcrypt_pbkdf.go b/vendor/golang.org/x/crypto/ssh/internal/bcrypt_pbkdf/bcrypt_pbkdf.go
new file mode 100644
index 00000000..af81d266
--- /dev/null
+++ b/vendor/golang.org/x/crypto/ssh/internal/bcrypt_pbkdf/bcrypt_pbkdf.go
@@ -0,0 +1,93 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package bcrypt_pbkdf implements bcrypt_pbkdf(3) from OpenBSD.
+//
+// See https://flak.tedunangst.com/post/bcrypt-pbkdf and
+// https://cvsweb.openbsd.org/cgi-bin/cvsweb/src/lib/libutil/bcrypt_pbkdf.c.
+package bcrypt_pbkdf
+
+import (
+	"crypto/sha512"
+	"errors"
+	"golang.org/x/crypto/blowfish"
+)
+
+const blockSize = 32
+
+// Key derives a key from the password, salt and rounds count, returning a
+// []byte of length keyLen that can be used as cryptographic key.
+func Key(password, salt []byte, rounds, keyLen int) ([]byte, error) {
+	if rounds < 1 {
+		return nil, errors.New("bcrypt_pbkdf: number of rounds is too small")
+	}
+	if len(password) == 0 {
+		return nil, errors.New("bcrypt_pbkdf: empty password")
+	}
+	if len(salt) == 0 || len(salt) > 1<<20 {
+		return nil, errors.New("bcrypt_pbkdf: bad salt length")
+	}
+	if keyLen > 1024 {
+		return nil, errors.New("bcrypt_pbkdf: keyLen is too large")
+	}
+
+	numBlocks := (keyLen + blockSize - 1) / blockSize
+	key := make([]byte, numBlocks*blockSize)
+
+	h := sha512.New()
+	h.Write(password)
+	shapass := h.Sum(nil)
+
+	shasalt := make([]byte, 0, sha512.Size)
+	cnt, tmp := make([]byte, 4), make([]byte, blockSize)
+	for block := 1; block <= numBlocks; block++ {
+		h.Reset()
+		h.Write(salt)
+		cnt[0] = byte(block >> 24)
+		cnt[1] = byte(block >> 16)
+		cnt[2] = byte(block >> 8)
+		cnt[3] = byte(block)
+		h.Write(cnt)
+		bcryptHash(tmp, shapass, h.Sum(shasalt))
+
+		out := make([]byte, blockSize)
+		copy(out, tmp)
+		for i := 2; i <= rounds; i++ {
+			h.Reset()
+			h.Write(tmp)
+			bcryptHash(tmp, shapass, h.Sum(shasalt))
+			for j := 0; j < len(out); j++ {
+				out[j] ^= tmp[j]
+			}
+		}
+
+		for i, v := range out {
+			key[i*numBlocks+(block-1)] = v
+		}
+	}
+	return key[:keyLen], nil
+}
+
+var magic = []byte("OxychromaticBlowfishSwatDynamite")
+
+func bcryptHash(out, shapass, shasalt []byte) {
+	c, err := blowfish.NewSaltedCipher(shapass, shasalt)
+	if err != nil {
+		panic(err)
+	}
+	for i := 0; i < 64; i++ {
+		blowfish.ExpandKey(shasalt, c)
+		blowfish.ExpandKey(shapass, c)
+	}
+	copy(out, magic)
+	for i := 0; i < 32; i += 8 {
+		for j := 0; j < 64; j++ {
+			c.Encrypt(out[i:i+8], out[i:i+8])
+		}
+	}
+	// Swap bytes due to different endianness.
+	for i := 0; i < 32; i += 4 {
+		out[i+3], out[i+2], out[i+1], out[i] = out[i], out[i+1], out[i+2], out[i+3]
+	}
+}
diff --git a/vendor/golang.org/x/crypto/ssh/keys.go b/vendor/golang.org/x/crypto/ssh/keys.go
index 96980479..06f537c1 100644
--- a/vendor/golang.org/x/crypto/ssh/keys.go
+++ b/vendor/golang.org/x/crypto/ssh/keys.go
@@ -7,6 +7,8 @@ package ssh
 import (
 	"bytes"
 	"crypto"
+	"crypto/aes"
+	"crypto/cipher"
 	"crypto/dsa"
 	"crypto/ecdsa"
 	"crypto/elliptic"
@@ -25,17 +27,20 @@ import (
 	"strings"
 
 	"golang.org/x/crypto/ed25519"
+	"golang.org/x/crypto/ssh/internal/bcrypt_pbkdf"
 )
 
 // These constants represent the algorithm names for key types supported by this
 // package.
 const (
-	KeyAlgoRSA      = "ssh-rsa"
-	KeyAlgoDSA      = "ssh-dss"
-	KeyAlgoECDSA256 = "ecdsa-sha2-nistp256"
-	KeyAlgoECDSA384 = "ecdsa-sha2-nistp384"
-	KeyAlgoECDSA521 = "ecdsa-sha2-nistp521"
-	KeyAlgoED25519  = "ssh-ed25519"
+	KeyAlgoRSA        = "ssh-rsa"
+	KeyAlgoDSA        = "ssh-dss"
+	KeyAlgoECDSA256   = "ecdsa-sha2-nistp256"
+	KeyAlgoSKECDSA256 = "sk-ecdsa-sha2-nistp256@openssh.com"
+	KeyAlgoECDSA384   = "ecdsa-sha2-nistp384"
+	KeyAlgoECDSA521   = "ecdsa-sha2-nistp521"
+	KeyAlgoED25519    = "ssh-ed25519"
+	KeyAlgoSKED25519  = "sk-ssh-ed25519@openssh.com"
 )
 
 // These constants represent non-default signature algorithms that are supported
@@ -58,9 +63,13 @@ func parsePubKey(in []byte, algo string) (pubKey PublicKey, rest []byte, err err
 		return parseDSA(in)
 	case KeyAlgoECDSA256, KeyAlgoECDSA384, KeyAlgoECDSA521:
 		return parseECDSA(in)
+	case KeyAlgoSKECDSA256:
+		return parseSKECDSA(in)
 	case KeyAlgoED25519:
 		return parseED25519(in)
-	case CertAlgoRSAv01, CertAlgoDSAv01, CertAlgoECDSA256v01, CertAlgoECDSA384v01, CertAlgoECDSA521v01, CertAlgoED25519v01:
+	case KeyAlgoSKED25519:
+		return parseSKEd25519(in)
+	case CertAlgoRSAv01, CertAlgoDSAv01, CertAlgoECDSA256v01, CertAlgoECDSA384v01, CertAlgoECDSA521v01, CertAlgoSKECDSA256v01, CertAlgoED25519v01, CertAlgoSKED25519v01:
 		cert, err := parseCert(in, certToPrivAlgo(algo))
 		if err != nil {
 			return nil, nil, err
@@ -553,9 +562,11 @@ func parseED25519(in []byte) (out PublicKey, rest []byte, err error) {
 		return nil, nil, err
 	}
 
-	key := ed25519.PublicKey(w.KeyBytes)
+	if l := len(w.KeyBytes); l != ed25519.PublicKeySize {
+		return nil, nil, fmt.Errorf("invalid size %d for Ed25519 public key", l)
+	}
 
-	return (ed25519PublicKey)(key), w.Rest, nil
+	return ed25519PublicKey(w.KeyBytes), w.Rest, nil
 }
 
 func (k ed25519PublicKey) Marshal() []byte {
@@ -573,9 +584,11 @@ func (k ed25519PublicKey) Verify(b []byte, sig *Signature) error {
 	if sig.Format != k.Type() {
 		return fmt.Errorf("ssh: signature type %s for key type %s", sig.Format, k.Type())
 	}
+	if l := len(k); l != ed25519.PublicKeySize {
+		return fmt.Errorf("ssh: invalid size %d for Ed25519 public key", l)
+	}
 
-	edKey := (ed25519.PublicKey)(k)
-	if ok := ed25519.Verify(edKey, b, sig.Blob); !ok {
+	if ok := ed25519.Verify(ed25519.PublicKey(k), b, sig.Blob); !ok {
 		return errors.New("ssh: signature did not verify")
 	}
 
@@ -685,6 +698,224 @@ func (k *ecdsaPublicKey) CryptoPublicKey() crypto.PublicKey {
 	return (*ecdsa.PublicKey)(k)
 }
 
+// skFields holds the additional fields present in U2F/FIDO2 signatures.
+// See openssh/PROTOCOL.u2f 'SSH U2F Signatures' for details.
+type skFields struct {
+	// Flags contains U2F/FIDO2 flags such as 'user present'
+	Flags byte
+	// Counter is a monotonic signature counter which can be
+	// used to detect concurrent use of a private key, should
+	// it be extracted from hardware.
+	Counter uint32
+}
+
+type skECDSAPublicKey struct {
+	// application is a URL-like string, typically "ssh:" for SSH.
+	// see openssh/PROTOCOL.u2f for details.
+	application string
+	ecdsa.PublicKey
+}
+
+func (k *skECDSAPublicKey) Type() string {
+	return KeyAlgoSKECDSA256
+}
+
+func (k *skECDSAPublicKey) nistID() string {
+	return "nistp256"
+}
+
+func parseSKECDSA(in []byte) (out PublicKey, rest []byte, err error) {
+	var w struct {
+		Curve       string
+		KeyBytes    []byte
+		Application string
+		Rest        []byte `ssh:"rest"`
+	}
+
+	if err := Unmarshal(in, &w); err != nil {
+		return nil, nil, err
+	}
+
+	key := new(skECDSAPublicKey)
+	key.application = w.Application
+
+	if w.Curve != "nistp256" {
+		return nil, nil, errors.New("ssh: unsupported curve")
+	}
+	key.Curve = elliptic.P256()
+
+	key.X, key.Y = elliptic.Unmarshal(key.Curve, w.KeyBytes)
+	if key.X == nil || key.Y == nil {
+		return nil, nil, errors.New("ssh: invalid curve point")
+	}
+
+	return key, w.Rest, nil
+}
+
+func (k *skECDSAPublicKey) Marshal() []byte {
+	// See RFC 5656, section 3.1.
+	keyBytes := elliptic.Marshal(k.Curve, k.X, k.Y)
+	w := struct {
+		Name        string
+		ID          string
+		Key         []byte
+		Application string
+	}{
+		k.Type(),
+		k.nistID(),
+		keyBytes,
+		k.application,
+	}
+
+	return Marshal(&w)
+}
+
+func (k *skECDSAPublicKey) Verify(data []byte, sig *Signature) error {
+	if sig.Format != k.Type() {
+		return fmt.Errorf("ssh: signature type %s for key type %s", sig.Format, k.Type())
+	}
+
+	h := ecHash(k.Curve).New()
+	h.Write([]byte(k.application))
+	appDigest := h.Sum(nil)
+
+	h.Reset()
+	h.Write(data)
+	dataDigest := h.Sum(nil)
+
+	var ecSig struct {
+		R *big.Int
+		S *big.Int
+	}
+	if err := Unmarshal(sig.Blob, &ecSig); err != nil {
+		return err
+	}
+
+	var skf skFields
+	if err := Unmarshal(sig.Rest, &skf); err != nil {
+		return err
+	}
+
+	blob := struct {
+		ApplicationDigest []byte `ssh:"rest"`
+		Flags             byte
+		Counter           uint32
+		MessageDigest     []byte `ssh:"rest"`
+	}{
+		appDigest,
+		skf.Flags,
+		skf.Counter,
+		dataDigest,
+	}
+
+	original := Marshal(blob)
+
+	h.Reset()
+	h.Write(original)
+	digest := h.Sum(nil)
+
+	if ecdsa.Verify((*ecdsa.PublicKey)(&k.PublicKey), digest, ecSig.R, ecSig.S) {
+		return nil
+	}
+	return errors.New("ssh: signature did not verify")
+}
+
+type skEd25519PublicKey struct {
+	// application is a URL-like string, typically "ssh:" for SSH.
+	// see openssh/PROTOCOL.u2f for details.
+	application string
+	ed25519.PublicKey
+}
+
+func (k *skEd25519PublicKey) Type() string {
+	return KeyAlgoSKED25519
+}
+
+func parseSKEd25519(in []byte) (out PublicKey, rest []byte, err error) {
+	var w struct {
+		KeyBytes    []byte
+		Application string
+		Rest        []byte `ssh:"rest"`
+	}
+
+	if err := Unmarshal(in, &w); err != nil {
+		return nil, nil, err
+	}
+
+	if l := len(w.KeyBytes); l != ed25519.PublicKeySize {
+		return nil, nil, fmt.Errorf("invalid size %d for Ed25519 public key", l)
+	}
+
+	key := new(skEd25519PublicKey)
+	key.application = w.Application
+	key.PublicKey = ed25519.PublicKey(w.KeyBytes)
+
+	return key, w.Rest, nil
+}
+
+func (k *skEd25519PublicKey) Marshal() []byte {
+	w := struct {
+		Name        string
+		KeyBytes    []byte
+		Application string
+	}{
+		KeyAlgoSKED25519,
+		[]byte(k.PublicKey),
+		k.application,
+	}
+	return Marshal(&w)
+}
+
+func (k *skEd25519PublicKey) Verify(data []byte, sig *Signature) error {
+	if sig.Format != k.Type() {
+		return fmt.Errorf("ssh: signature type %s for key type %s", sig.Format, k.Type())
+	}
+	if l := len(k.PublicKey); l != ed25519.PublicKeySize {
+		return fmt.Errorf("invalid size %d for Ed25519 public key", l)
+	}
+
+	h := sha256.New()
+	h.Write([]byte(k.application))
+	appDigest := h.Sum(nil)
+
+	h.Reset()
+	h.Write(data)
+	dataDigest := h.Sum(nil)
+
+	var edSig struct {
+		Signature []byte `ssh:"rest"`
+	}
+
+	if err := Unmarshal(sig.Blob, &edSig); err != nil {
+		return err
+	}
+
+	var skf skFields
+	if err := Unmarshal(sig.Rest, &skf); err != nil {
+		return err
+	}
+
+	blob := struct {
+		ApplicationDigest []byte `ssh:"rest"`
+		Flags             byte
+		Counter           uint32
+		MessageDigest     []byte `ssh:"rest"`
+	}{
+		appDigest,
+		skf.Flags,
+		skf.Counter,
+		dataDigest,
+	}
+
+	original := Marshal(blob)
+
+	if ok := ed25519.Verify(k.PublicKey, original, edSig.Signature); !ok {
+		return errors.New("ssh: signature did not verify")
+	}
+
+	return nil
+}
+
 // NewSignerFromKey takes an *rsa.PrivateKey, *dsa.PrivateKey,
 // *ecdsa.PrivateKey or any other crypto.Signer and returns a
 // corresponding Signer instance. ECDSA keys must use P-256, P-384 or
@@ -830,14 +1061,18 @@ func NewPublicKey(key interface{}) (PublicKey, error) {
 	case *dsa.PublicKey:
 		return (*dsaPublicKey)(key), nil
 	case ed25519.PublicKey:
-		return (ed25519PublicKey)(key), nil
+		if l := len(key); l != ed25519.PublicKeySize {
+			return nil, fmt.Errorf("ssh: invalid size %d for Ed25519 public key", l)
+		}
+		return ed25519PublicKey(key), nil
 	default:
 		return nil, fmt.Errorf("ssh: unsupported key type %T", key)
 	}
 }
 
 // ParsePrivateKey returns a Signer from a PEM encoded private key. It supports
-// the same keys as ParseRawPrivateKey.
+// the same keys as ParseRawPrivateKey. If the private key is encrypted, it
+// will return a PassphraseMissingError.
 func ParsePrivateKey(pemBytes []byte) (Signer, error) {
 	key, err := ParseRawPrivateKey(pemBytes)
 	if err != nil {
@@ -850,8 +1085,8 @@ func ParsePrivateKey(pemBytes []byte) (Signer, error) {
 // ParsePrivateKeyWithPassphrase returns a Signer from a PEM encoded private
 // key and passphrase. It supports the same keys as
 // ParseRawPrivateKeyWithPassphrase.
-func ParsePrivateKeyWithPassphrase(pemBytes, passPhrase []byte) (Signer, error) {
-	key, err := ParseRawPrivateKeyWithPassphrase(pemBytes, passPhrase)
+func ParsePrivateKeyWithPassphrase(pemBytes, passphrase []byte) (Signer, error) {
+	key, err := ParseRawPrivateKeyWithPassphrase(pemBytes, passphrase)
 	if err != nil {
 		return nil, err
 	}
@@ -867,8 +1102,21 @@ func encryptedBlock(block *pem.Block) bool {
 	return strings.Contains(block.Headers["Proc-Type"], "ENCRYPTED")
 }
 
+// A PassphraseMissingError indicates that parsing this private key requires a
+// passphrase. Use ParsePrivateKeyWithPassphrase.
+type PassphraseMissingError struct {
+	// PublicKey will be set if the private key format includes an unencrypted
+	// public key along with the encrypted private key.
+	PublicKey PublicKey
+}
+
+func (*PassphraseMissingError) Error() string {
+	return "ssh: this private key is passphrase protected"
+}
+
 // ParseRawPrivateKey returns a private key from a PEM encoded private key. It
-// supports RSA (PKCS#1), PKCS#8, DSA (OpenSSL), and ECDSA private keys.
+// supports RSA (PKCS#1), PKCS#8, DSA (OpenSSL), and ECDSA private keys. If the
+// private key is encrypted, it will return a PassphraseMissingError.
 func ParseRawPrivateKey(pemBytes []byte) (interface{}, error) {
 	block, _ := pem.Decode(pemBytes)
 	if block == nil {
@@ -876,7 +1124,7 @@ func ParseRawPrivateKey(pemBytes []byte) (interface{}, error) {
 	}
 
 	if encryptedBlock(block) {
-		return nil, errors.New("ssh: cannot decode encrypted private keys")
+		return nil, &PassphraseMissingError{}
 	}
 
 	switch block.Type {
@@ -890,33 +1138,35 @@ func ParseRawPrivateKey(pemBytes []byte) (interface{}, error) {
 	case "DSA PRIVATE KEY":
 		return ParseDSAPrivateKey(block.Bytes)
 	case "OPENSSH PRIVATE KEY":
-		return parseOpenSSHPrivateKey(block.Bytes)
+		return parseOpenSSHPrivateKey(block.Bytes, unencryptedOpenSSHKey)
 	default:
 		return nil, fmt.Errorf("ssh: unsupported key type %q", block.Type)
 	}
 }
 
 // ParseRawPrivateKeyWithPassphrase returns a private key decrypted with
-// passphrase from a PEM encoded private key. If wrong passphrase, return
-// x509.IncorrectPasswordError.
-func ParseRawPrivateKeyWithPassphrase(pemBytes, passPhrase []byte) (interface{}, error) {
+// passphrase from a PEM encoded private key. If the passphrase is wrong, it
+// will return x509.IncorrectPasswordError.
+func ParseRawPrivateKeyWithPassphrase(pemBytes, passphrase []byte) (interface{}, error) {
 	block, _ := pem.Decode(pemBytes)
 	if block == nil {
 		return nil, errors.New("ssh: no key found")
 	}
-	buf := block.Bytes
 
-	if encryptedBlock(block) {
-		if x509.IsEncryptedPEMBlock(block) {
-			var err error
-			buf, err = x509.DecryptPEMBlock(block, passPhrase)
-			if err != nil {
-				if err == x509.IncorrectPasswordError {
-					return nil, err
-				}
-				return nil, fmt.Errorf("ssh: cannot decode encrypted private keys: %v", err)
-			}
+	if block.Type == "OPENSSH PRIVATE KEY" {
+		return parseOpenSSHPrivateKey(block.Bytes, passphraseProtectedOpenSSHKey(passphrase))
+	}
+
+	if !encryptedBlock(block) || !x509.IsEncryptedPEMBlock(block) {
+		return nil, errors.New("ssh: not an encrypted key")
+	}
+
+	buf, err := x509.DecryptPEMBlock(block, passphrase)
+	if err != nil {
+		if err == x509.IncorrectPasswordError {
+			return nil, err
 		}
+		return nil, fmt.Errorf("ssh: cannot decode encrypted private keys: %v", err)
 	}
 
 	switch block.Type {
@@ -926,8 +1176,6 @@ func ParseRawPrivateKeyWithPassphrase(pemBytes, passPhrase []byte) (interface{},
 		return x509.ParseECPrivateKey(buf)
 	case "DSA PRIVATE KEY":
 		return ParseDSAPrivateKey(buf)
-	case "OPENSSH PRIVATE KEY":
-		return parseOpenSSHPrivateKey(buf)
 	default:
 		return nil, fmt.Errorf("ssh: unsupported key type %q", block.Type)
 	}
@@ -965,9 +1213,60 @@ func ParseDSAPrivateKey(der []byte) (*dsa.PrivateKey, error) {
 	}, nil
 }
 
-// Implemented based on the documentation at
-// https://github.com/openssh/openssh-portable/blob/master/PROTOCOL.key
-func parseOpenSSHPrivateKey(key []byte) (crypto.PrivateKey, error) {
+func unencryptedOpenSSHKey(cipherName, kdfName, kdfOpts string, privKeyBlock []byte) ([]byte, error) {
+	if kdfName != "none" || cipherName != "none" {
+		return nil, &PassphraseMissingError{}
+	}
+	if kdfOpts != "" {
+		return nil, errors.New("ssh: invalid openssh private key")
+	}
+	return privKeyBlock, nil
+}
+
+func passphraseProtectedOpenSSHKey(passphrase []byte) openSSHDecryptFunc {
+	return func(cipherName, kdfName, kdfOpts string, privKeyBlock []byte) ([]byte, error) {
+		if kdfName == "none" || cipherName == "none" {
+			return nil, errors.New("ssh: key is not password protected")
+		}
+		if kdfName != "bcrypt" {
+			return nil, fmt.Errorf("ssh: unknown KDF %q, only supports %q", kdfName, "bcrypt")
+		}
+
+		var opts struct {
+			Salt   string
+			Rounds uint32
+		}
+		if err := Unmarshal([]byte(kdfOpts), &opts); err != nil {
+			return nil, err
+		}
+
+		k, err := bcrypt_pbkdf.Key(passphrase, []byte(opts.Salt), int(opts.Rounds), 32+16)
+		if err != nil {
+			return nil, err
+		}
+		key, iv := k[:32], k[32:]
+
+		if cipherName != "aes256-ctr" {
+			return nil, fmt.Errorf("ssh: unknown cipher %q, only supports %q", cipherName, "aes256-ctr")
+		}
+		c, err := aes.NewCipher(key)
+		if err != nil {
+			return nil, err
+		}
+		ctr := cipher.NewCTR(c, iv)
+		ctr.XORKeyStream(privKeyBlock, privKeyBlock)
+
+		return privKeyBlock, nil
+	}
+}
+
+type openSSHDecryptFunc func(CipherName, KdfName, KdfOpts string, PrivKeyBlock []byte) ([]byte, error)
+
+// parseOpenSSHPrivateKey parses an OpenSSH private key, using the decrypt
+// function to unwrap the encrypted portion. unencryptedOpenSSHKey can be used
+// as the decrypt function to parse an unencrypted private key. See
+// https://github.com/openssh/openssh-portable/blob/master/PROTOCOL.key.
+func parseOpenSSHPrivateKey(key []byte, decrypt openSSHDecryptFunc) (crypto.PrivateKey, error) {
 	const magic = "openssh-key-v1\x00"
 	if len(key) < len(magic) || string(key[:len(magic)]) != magic {
 		return nil, errors.New("ssh: invalid openssh private key format")
@@ -986,9 +1285,22 @@ func parseOpenSSHPrivateKey(key []byte) (crypto.PrivateKey, error) {
 	if err := Unmarshal(remaining, &w); err != nil {
 		return nil, err
 	}
+	if w.NumKeys != 1 {
+		// We only support single key files, and so does OpenSSH.
+		// https://github.com/openssh/openssh-portable/blob/4103a3ec7/sshkey.c#L4171
+		return nil, errors.New("ssh: multi-key files are not supported")
+	}
 
-	if w.KdfName != "none" || w.CipherName != "none" {
-		return nil, errors.New("ssh: cannot decode encrypted private keys")
+	privKeyBlock, err := decrypt(w.CipherName, w.KdfName, w.KdfOpts, w.PrivKeyBlock)
+	if err != nil {
+		if err, ok := err.(*PassphraseMissingError); ok {
+			pub, errPub := ParsePublicKey(w.PubKey)
+			if errPub != nil {
+				return nil, fmt.Errorf("ssh: failed to parse embedded public key: %v", errPub)
+			}
+			err.PublicKey = pub
+		}
+		return nil, err
 	}
 
 	pk1 := struct {
@@ -998,15 +1310,13 @@ func parseOpenSSHPrivateKey(key []byte) (crypto.PrivateKey, error) {
 		Rest    []byte `ssh:"rest"`
 	}{}
 
-	if err := Unmarshal(w.PrivKeyBlock, &pk1); err != nil {
-		return nil, err
+	if err := Unmarshal(privKeyBlock, &pk1); err != nil || pk1.Check1 != pk1.Check2 {
+		if w.CipherName != "none" {
+			return nil, x509.IncorrectPasswordError
+		}
+		return nil, errors.New("ssh: malformed OpenSSH key")
 	}
 
-	if pk1.Check1 != pk1.Check2 {
-		return nil, errors.New("ssh: checkint mismatch")
-	}
-
-	// we only handle ed25519 and rsa keys currently
 	switch pk1.Keytype {
 	case KeyAlgoRSA:
 		// https://github.com/openssh/openssh-portable/blob/master/sshkey.c#L2760-L2773
@@ -1025,10 +1335,8 @@ func parseOpenSSHPrivateKey(key []byte) (crypto.PrivateKey, error) {
 			return nil, err
 		}
 
-		for i, b := range key.Pad {
-			if int(b) != i+1 {
-				return nil, errors.New("ssh: padding not as expected")
-			}
+		if err := checkOpenSSHKeyPadding(key.Pad); err != nil {
+			return nil, err
 		}
 
 		pk := &rsa.PrivateKey{
@@ -1063,20 +1371,78 @@ func parseOpenSSHPrivateKey(key []byte) (crypto.PrivateKey, error) {
 			return nil, errors.New("ssh: private key unexpected length")
 		}
 
-		for i, b := range key.Pad {
-			if int(b) != i+1 {
-				return nil, errors.New("ssh: padding not as expected")
-			}
+		if err := checkOpenSSHKeyPadding(key.Pad); err != nil {
+			return nil, err
 		}
 
 		pk := ed25519.PrivateKey(make([]byte, ed25519.PrivateKeySize))
 		copy(pk, key.Priv)
 		return &pk, nil
+	case KeyAlgoECDSA256, KeyAlgoECDSA384, KeyAlgoECDSA521:
+		key := struct {
+			Curve   string
+			Pub     []byte
+			D       *big.Int
+			Comment string
+			Pad     []byte `ssh:"rest"`
+		}{}
+
+		if err := Unmarshal(pk1.Rest, &key); err != nil {
+			return nil, err
+		}
+
+		if err := checkOpenSSHKeyPadding(key.Pad); err != nil {
+			return nil, err
+		}
+
+		var curve elliptic.Curve
+		switch key.Curve {
+		case "nistp256":
+			curve = elliptic.P256()
+		case "nistp384":
+			curve = elliptic.P384()
+		case "nistp521":
+			curve = elliptic.P521()
+		default:
+			return nil, errors.New("ssh: unhandled elliptic curve: " + key.Curve)
+		}
+
+		X, Y := elliptic.Unmarshal(curve, key.Pub)
+		if X == nil || Y == nil {
+			return nil, errors.New("ssh: failed to unmarshal public key")
+		}
+
+		if key.D.Cmp(curve.Params().N) >= 0 {
+			return nil, errors.New("ssh: scalar is out of range")
+		}
+
+		x, y := curve.ScalarBaseMult(key.D.Bytes())
+		if x.Cmp(X) != 0 || y.Cmp(Y) != 0 {
+			return nil, errors.New("ssh: public key does not match private key")
+		}
+
+		return &ecdsa.PrivateKey{
+			PublicKey: ecdsa.PublicKey{
+				Curve: curve,
+				X:     X,
+				Y:     Y,
+			},
+			D: key.D,
+		}, nil
 	default:
 		return nil, errors.New("ssh: unhandled key type")
 	}
 }
 
+func checkOpenSSHKeyPadding(pad []byte) error {
+	for i, b := range pad {
+		if int(b) != i+1 {
+			return errors.New("ssh: padding not as expected")
+		}
+	}
+	return nil
+}
+
 // FingerprintLegacyMD5 returns the user presentation of the key's
 // fingerprint as described by RFC 4716 section 4.
 func FingerprintLegacyMD5(pubKey PublicKey) string {
diff --git a/vendor/golang.org/x/crypto/ssh/server.go b/vendor/golang.org/x/crypto/ssh/server.go
index 7a5a1d7a..7d42a8c8 100644
--- a/vendor/golang.org/x/crypto/ssh/server.go
+++ b/vendor/golang.org/x/crypto/ssh/server.go
@@ -284,8 +284,8 @@ func (s *connection) serverHandshake(config *ServerConfig) (*Permissions, error)
 
 func isAcceptableAlgo(algo string) bool {
 	switch algo {
-	case KeyAlgoRSA, KeyAlgoDSA, KeyAlgoECDSA256, KeyAlgoECDSA384, KeyAlgoECDSA521, KeyAlgoED25519,
-		CertAlgoRSAv01, CertAlgoDSAv01, CertAlgoECDSA256v01, CertAlgoECDSA384v01, CertAlgoECDSA521v01, CertAlgoED25519v01:
+	case KeyAlgoRSA, KeyAlgoDSA, KeyAlgoECDSA256, KeyAlgoECDSA384, KeyAlgoECDSA521, KeyAlgoSKECDSA256, KeyAlgoED25519, KeyAlgoSKED25519,
+		CertAlgoRSAv01, CertAlgoDSAv01, CertAlgoECDSA256v01, CertAlgoECDSA384v01, CertAlgoECDSA521v01, CertAlgoSKECDSA256v01, CertAlgoED25519v01, CertAlgoSKED25519v01:
 		return true
 	}
 	return false
diff --git a/vendor/golang.org/x/crypto/ssh/terminal/terminal.go b/vendor/golang.org/x/crypto/ssh/terminal/terminal.go
index 2f04ee5b..d1b4fca3 100644
--- a/vendor/golang.org/x/crypto/ssh/terminal/terminal.go
+++ b/vendor/golang.org/x/crypto/ssh/terminal/terminal.go
@@ -7,6 +7,7 @@ package terminal
 import (
 	"bytes"
 	"io"
+	"runtime"
 	"strconv"
 	"sync"
 	"unicode/utf8"
@@ -939,6 +940,8 @@ func (s *stRingBuffer) NthPreviousEntry(n int) (value string, ok bool) {
 // readPasswordLine reads from reader until it finds \n or io.EOF.
 // The slice returned does not include the \n.
 // readPasswordLine also ignores any \r it finds.
+// Windows uses \r as end of line. So, on Windows, readPasswordLine
+// reads until it finds \r and ignores any \n it finds during processing.
 func readPasswordLine(reader io.Reader) ([]byte, error) {
 	var buf [1]byte
 	var ret []byte
@@ -947,10 +950,20 @@ func readPasswordLine(reader io.Reader) ([]byte, error) {
 		n, err := reader.Read(buf[:])
 		if n > 0 {
 			switch buf[0] {
+			case '\b':
+				if len(ret) > 0 {
+					ret = ret[:len(ret)-1]
+				}
 			case '\n':
-				return ret, nil
+				if runtime.GOOS != "windows" {
+					return ret, nil
+				}
+				// otherwise ignore \n
 			case '\r':
-				// remove \r from passwords on Windows
+				if runtime.GOOS == "windows" {
+					return ret, nil
+				}
+				// otherwise ignore \r
 			default:
 				ret = append(ret, buf[0])
 			}
diff --git a/vendor/golang.org/x/crypto/ssh/terminal/util_windows.go b/vendor/golang.org/x/crypto/ssh/terminal/util_windows.go
index 5cfdf8f3..f614e9cb 100644
--- a/vendor/golang.org/x/crypto/ssh/terminal/util_windows.go
+++ b/vendor/golang.org/x/crypto/ssh/terminal/util_windows.go
@@ -85,8 +85,8 @@ func ReadPassword(fd int) ([]byte, error) {
 	}
 	old := st
 
-	st &^= (windows.ENABLE_ECHO_INPUT)
-	st |= (windows.ENABLE_PROCESSED_INPUT | windows.ENABLE_LINE_INPUT | windows.ENABLE_PROCESSED_OUTPUT)
+	st &^= (windows.ENABLE_ECHO_INPUT | windows.ENABLE_LINE_INPUT)
+	st |= (windows.ENABLE_PROCESSED_OUTPUT | windows.ENABLE_PROCESSED_INPUT)
 	if err := windows.SetConsoleMode(windows.Handle(fd), st); err != nil {
 		return nil, err
 	}
diff --git a/vendor/golang.org/x/text/encoding/encoding.go b/vendor/golang.org/x/text/encoding/encoding.go
new file mode 100644
index 00000000..a0bd7cd4
--- /dev/null
+++ b/vendor/golang.org/x/text/encoding/encoding.go
@@ -0,0 +1,335 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package encoding defines an interface for character encodings, such as Shift
+// JIS and Windows 1252, that can convert to and from UTF-8.
+//
+// Encoding implementations are provided in other packages, such as
+// golang.org/x/text/encoding/charmap and
+// golang.org/x/text/encoding/japanese.
+package encoding // import "golang.org/x/text/encoding"
+
+import (
+	"errors"
+	"io"
+	"strconv"
+	"unicode/utf8"
+
+	"golang.org/x/text/encoding/internal/identifier"
+	"golang.org/x/text/transform"
+)
+
+// TODO:
+// - There seems to be some inconsistency in when decoders return errors
+//   and when not. Also documentation seems to suggest they shouldn't return
+//   errors at all (except for UTF-16).
+// - Encoders seem to rely on or at least benefit from the input being in NFC
+//   normal form. Perhaps add an example how users could prepare their output.
+
+// Encoding is a character set encoding that can be transformed to and from
+// UTF-8.
+type Encoding interface {
+	// NewDecoder returns a Decoder.
+	NewDecoder() *Decoder
+
+	// NewEncoder returns an Encoder.
+	NewEncoder() *Encoder
+}
+
+// A Decoder converts bytes to UTF-8. It implements transform.Transformer.
+//
+// Transforming source bytes that are not of that encoding will not result in an
+// error per se. Each byte that cannot be transcoded will be represented in the
+// output by the UTF-8 encoding of '\uFFFD', the replacement rune.
+type Decoder struct {
+	transform.Transformer
+
+	// This forces external creators of Decoders to use names in struct
+	// initializers, allowing for future extendibility without having to break
+	// code.
+	_ struct{}
+}
+
+// Bytes converts the given encoded bytes to UTF-8. It returns the converted
+// bytes or nil, err if any error occurred.
+func (d *Decoder) Bytes(b []byte) ([]byte, error) {
+	b, _, err := transform.Bytes(d, b)
+	if err != nil {
+		return nil, err
+	}
+	return b, nil
+}
+
+// String converts the given encoded string to UTF-8. It returns the converted
+// string or "", err if any error occurred.
+func (d *Decoder) String(s string) (string, error) {
+	s, _, err := transform.String(d, s)
+	if err != nil {
+		return "", err
+	}
+	return s, nil
+}
+
+// Reader wraps another Reader to decode its bytes.
+//
+// The Decoder may not be used for any other operation as long as the returned
+// Reader is in use.
+func (d *Decoder) Reader(r io.Reader) io.Reader {
+	return transform.NewReader(r, d)
+}
+
+// An Encoder converts bytes from UTF-8. It implements transform.Transformer.
+//
+// Each rune that cannot be transcoded will result in an error. In this case,
+// the transform will consume all source byte up to, not including the offending
+// rune. Transforming source bytes that are not valid UTF-8 will be replaced by
+// `\uFFFD`. To return early with an error instead, use transform.Chain to
+// preprocess the data with a UTF8Validator.
+type Encoder struct {
+	transform.Transformer
+
+	// This forces external creators of Encoders to use names in struct
+	// initializers, allowing for future extendibility without having to break
+	// code.
+	_ struct{}
+}
+
+// Bytes converts bytes from UTF-8. It returns the converted bytes or nil, err if
+// any error occurred.
+func (e *Encoder) Bytes(b []byte) ([]byte, error) {
+	b, _, err := transform.Bytes(e, b)
+	if err != nil {
+		return nil, err
+	}
+	return b, nil
+}
+
+// String converts a string from UTF-8. It returns the converted string or
+// "", err if any error occurred.
+func (e *Encoder) String(s string) (string, error) {
+	s, _, err := transform.String(e, s)
+	if err != nil {
+		return "", err
+	}
+	return s, nil
+}
+
+// Writer wraps another Writer to encode its UTF-8 output.
+//
+// The Encoder may not be used for any other operation as long as the returned
+// Writer is in use.
+func (e *Encoder) Writer(w io.Writer) io.Writer {
+	return transform.NewWriter(w, e)
+}
+
+// ASCIISub is the ASCII substitute character, as recommended by
+// https://unicode.org/reports/tr36/#Text_Comparison
+const ASCIISub = '\x1a'
+
+// Nop is the nop encoding. Its transformed bytes are the same as the source
+// bytes; it does not replace invalid UTF-8 sequences.
+var Nop Encoding = nop{}
+
+type nop struct{}
+
+func (nop) NewDecoder() *Decoder {
+	return &Decoder{Transformer: transform.Nop}
+}
+func (nop) NewEncoder() *Encoder {
+	return &Encoder{Transformer: transform.Nop}
+}
+
+// Replacement is the replacement encoding. Decoding from the replacement
+// encoding yields a single '\uFFFD' replacement rune. Encoding from UTF-8 to
+// the replacement encoding yields the same as the source bytes except that
+// invalid UTF-8 is converted to '\uFFFD'.
+//
+// It is defined at http://encoding.spec.whatwg.org/#replacement
+var Replacement Encoding = replacement{}
+
+type replacement struct{}
+
+func (replacement) NewDecoder() *Decoder {
+	return &Decoder{Transformer: replacementDecoder{}}
+}
+
+func (replacement) NewEncoder() *Encoder {
+	return &Encoder{Transformer: replacementEncoder{}}
+}
+
+func (replacement) ID() (mib identifier.MIB, other string) {
+	return identifier.Replacement, ""
+}
+
+type replacementDecoder struct{ transform.NopResetter }
+
+func (replacementDecoder) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
+	if len(dst) < 3 {
+		return 0, 0, transform.ErrShortDst
+	}
+	if atEOF {
+		const fffd = "\ufffd"
+		dst[0] = fffd[0]
+		dst[1] = fffd[1]
+		dst[2] = fffd[2]
+		nDst = 3
+	}
+	return nDst, len(src), nil
+}
+
+type replacementEncoder struct{ transform.NopResetter }
+
+func (replacementEncoder) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
+	r, size := rune(0), 0
+
+	for ; nSrc < len(src); nSrc += size {
+		r = rune(src[nSrc])
+
+		// Decode a 1-byte rune.
+		if r < utf8.RuneSelf {
+			size = 1
+
+		} else {
+			// Decode a multi-byte rune.
+			r, size = utf8.DecodeRune(src[nSrc:])
+			if size == 1 {
+				// All valid runes of size 1 (those below utf8.RuneSelf) were
+				// handled above. We have invalid UTF-8 or we haven't seen the
+				// full character yet.
+				if !atEOF && !utf8.FullRune(src[nSrc:]) {
+					err = transform.ErrShortSrc
+					break
+				}
+				r = '\ufffd'
+			}
+		}
+
+		if nDst+utf8.RuneLen(r) > len(dst) {
+			err = transform.ErrShortDst
+			break
+		}
+		nDst += utf8.EncodeRune(dst[nDst:], r)
+	}
+	return nDst, nSrc, err
+}
+
+// HTMLEscapeUnsupported wraps encoders to replace source runes outside the
+// repertoire of the destination encoding with HTML escape sequences.
+//
+// This wrapper exists to comply to URL and HTML forms requiring a
+// non-terminating legacy encoder. The produced sequences may lead to data
+// loss as they are indistinguishable from legitimate input. To avoid this
+// issue, use UTF-8 encodings whenever possible.
+func HTMLEscapeUnsupported(e *Encoder) *Encoder {
+	return &Encoder{Transformer: &errorHandler{e, errorToHTML}}
+}
+
+// ReplaceUnsupported wraps encoders to replace source runes outside the
+// repertoire of the destination encoding with an encoding-specific
+// replacement.
+//
+// This wrapper is only provided for backwards compatibility and legacy
+// handling. Its use is strongly discouraged. Use UTF-8 whenever possible.
+func ReplaceUnsupported(e *Encoder) *Encoder {
+	return &Encoder{Transformer: &errorHandler{e, errorToReplacement}}
+}
+
+type errorHandler struct {
+	*Encoder
+	handler func(dst []byte, r rune, err repertoireError) (n int, ok bool)
+}
+
+// TODO: consider making this error public in some form.
+type repertoireError interface {
+	Replacement() byte
+}
+
+func (h errorHandler) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
+	nDst, nSrc, err = h.Transformer.Transform(dst, src, atEOF)
+	for err != nil {
+		rerr, ok := err.(repertoireError)
+		if !ok {
+			return nDst, nSrc, err
+		}
+		r, sz := utf8.DecodeRune(src[nSrc:])
+		n, ok := h.handler(dst[nDst:], r, rerr)
+		if !ok {
+			return nDst, nSrc, transform.ErrShortDst
+		}
+		err = nil
+		nDst += n
+		if nSrc += sz; nSrc < len(src) {
+			var dn, sn int
+			dn, sn, err = h.Transformer.Transform(dst[nDst:], src[nSrc:], atEOF)
+			nDst += dn
+			nSrc += sn
+		}
+	}
+	return nDst, nSrc, err
+}
+
+func errorToHTML(dst []byte, r rune, err repertoireError) (n int, ok bool) {
+	buf := [8]byte{}
+	b := strconv.AppendUint(buf[:0], uint64(r), 10)
+	if n = len(b) + len("&#;"); n >= len(dst) {
+		return 0, false
+	}
+	dst[0] = '&'
+	dst[1] = '#'
+	dst[copy(dst[2:], b)+2] = ';'
+	return n, true
+}
+
+func errorToReplacement(dst []byte, r rune, err repertoireError) (n int, ok bool) {
+	if len(dst) == 0 {
+		return 0, false
+	}
+	dst[0] = err.Replacement()
+	return 1, true
+}
+
+// ErrInvalidUTF8 means that a transformer encountered invalid UTF-8.
+var ErrInvalidUTF8 = errors.New("encoding: invalid UTF-8")
+
+// UTF8Validator is a transformer that returns ErrInvalidUTF8 on the first
+// input byte that is not valid UTF-8.
+var UTF8Validator transform.Transformer = utf8Validator{}
+
+type utf8Validator struct{ transform.NopResetter }
+
+func (utf8Validator) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
+	n := len(src)
+	if n > len(dst) {
+		n = len(dst)
+	}
+	for i := 0; i < n; {
+		if c := src[i]; c < utf8.RuneSelf {
+			dst[i] = c
+			i++
+			continue
+		}
+		_, size := utf8.DecodeRune(src[i:])
+		if size == 1 {
+			// All valid runes of size 1 (those below utf8.RuneSelf) were
+			// handled above. We have invalid UTF-8 or we haven't seen the
+			// full character yet.
+			err = ErrInvalidUTF8
+			if !atEOF && !utf8.FullRune(src[i:]) {
+				err = transform.ErrShortSrc
+			}
+			return i, i, err
+		}
+		if i+size > len(dst) {
+			return i, i, transform.ErrShortDst
+		}
+		for ; size > 0; size-- {
+			dst[i] = src[i]
+			i++
+		}
+	}
+	if len(src) > len(dst) {
+		err = transform.ErrShortDst
+	}
+	return n, n, err
+}
diff --git a/vendor/golang.org/x/text/encoding/internal/identifier/identifier.go b/vendor/golang.org/x/text/encoding/internal/identifier/identifier.go
new file mode 100644
index 00000000..5c9b85c2
--- /dev/null
+++ b/vendor/golang.org/x/text/encoding/internal/identifier/identifier.go
@@ -0,0 +1,81 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:generate go run gen.go
+
+// Package identifier defines the contract between implementations of Encoding
+// and Index by defining identifiers that uniquely identify standardized coded
+// character sets (CCS) and character encoding schemes (CES), which we will
+// together refer to as encodings, for which Encoding implementations provide
+// converters to and from UTF-8. This package is typically only of concern to
+// implementers of Indexes and Encodings.
+//
+// One part of the identifier is the MIB code, which is defined by IANA and
+// uniquely identifies a CCS or CES. Each code is associated with data that
+// references authorities, official documentation as well as aliases and MIME
+// names.
+//
+// Not all CESs are covered by the IANA registry. The "other" string that is
+// returned by ID can be used to identify other character sets or versions of
+// existing ones.
+//
+// It is recommended that each package that provides a set of Encodings provide
+// the All and Common variables to reference all supported encodings and
+// commonly used subset. This allows Index implementations to include all
+// available encodings without explicitly referencing or knowing about them.
+package identifier
+
+// Note: this package is internal, but could be made public if there is a need
+// for writing third-party Indexes and Encodings.
+
+// References:
+// - http://source.icu-project.org/repos/icu/icu/trunk/source/data/mappings/convrtrs.txt
+// - http://www.iana.org/assignments/character-sets/character-sets.xhtml
+// - http://www.iana.org/assignments/ianacharset-mib/ianacharset-mib
+// - http://www.ietf.org/rfc/rfc2978.txt
+// - https://www.unicode.org/reports/tr22/
+// - http://www.w3.org/TR/encoding/
+// - https://encoding.spec.whatwg.org/
+// - https://encoding.spec.whatwg.org/encodings.json
+// - https://tools.ietf.org/html/rfc6657#section-5
+
+// Interface can be implemented by Encodings to define the CCS or CES for which
+// it implements conversions.
+type Interface interface {
+	// ID returns an encoding identifier. Exactly one of the mib and other
+	// values should be non-zero.
+	//
+	// In the usual case it is only necessary to indicate the MIB code. The
+	// other string can be used to specify encodings for which there is no MIB,
+	// such as "x-mac-dingbat".
+	//
+	// The other string may only contain the characters a-z, A-Z, 0-9, - and _.
+	ID() (mib MIB, other string)
+
+	// NOTE: the restrictions on the encoding are to allow extending the syntax
+	// with additional information such as versions, vendors and other variants.
+}
+
+// A MIB identifies an encoding. It is derived from the IANA MIB codes and adds
+// some identifiers for some encodings that are not covered by the IANA
+// standard.
+//
+// See http://www.iana.org/assignments/ianacharset-mib.
+type MIB uint16
+
+// These additional MIB types are not defined in IANA. They are added because
+// they are common and defined within the text repo.
+const (
+	// Unofficial marks the start of encodings not registered by IANA.
+	Unofficial MIB = 10000 + iota
+
+	// Replacement is the WhatWG replacement encoding.
+	Replacement
+
+	// XUserDefined is the code for x-user-defined.
+	XUserDefined
+
+	// MacintoshCyrillic is the code for x-mac-cyrillic.
+	MacintoshCyrillic
+)
diff --git a/vendor/golang.org/x/text/encoding/internal/identifier/mib.go b/vendor/golang.org/x/text/encoding/internal/identifier/mib.go
new file mode 100644
index 00000000..fc7df1bc
--- /dev/null
+++ b/vendor/golang.org/x/text/encoding/internal/identifier/mib.go
@@ -0,0 +1,1619 @@
+// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
+
+package identifier
+
+const (
+	// ASCII is the MIB identifier with IANA name US-ASCII (MIME: US-ASCII).
+	//
+	// ANSI X3.4-1986
+	// Reference: RFC2046
+	ASCII MIB = 3
+
+	// ISOLatin1 is the MIB identifier with IANA name ISO_8859-1:1987 (MIME: ISO-8859-1).
+	//
+	// ISO-IR: International Register of Escape Sequences
+	// Note: The current registration authority is IPSJ/ITSCJ, Japan.
+	// Reference: RFC1345
+	ISOLatin1 MIB = 4
+
+	// ISOLatin2 is the MIB identifier with IANA name ISO_8859-2:1987 (MIME: ISO-8859-2).
+	//
+	// ISO-IR: International Register of Escape Sequences
+	// Note: The current registration authority is IPSJ/ITSCJ, Japan.
+	// Reference: RFC1345
+	ISOLatin2 MIB = 5
+
+	// ISOLatin3 is the MIB identifier with IANA name ISO_8859-3:1988 (MIME: ISO-8859-3).
+	//
+	// ISO-IR: International Register of Escape Sequences
+	// Note: The current registration authority is IPSJ/ITSCJ, Japan.
+	// Reference: RFC1345
+	ISOLatin3 MIB = 6
+
+	// ISOLatin4 is the MIB identifier with IANA name ISO_8859-4:1988 (MIME: ISO-8859-4).
+	//
+	// ISO-IR: International Register of Escape Sequences
+	// Note: The current registration authority is IPSJ/ITSCJ, Japan.
+	// Reference: RFC1345
+	ISOLatin4 MIB = 7
+
+	// ISOLatinCyrillic is the MIB identifier with IANA name ISO_8859-5:1988 (MIME: ISO-8859-5).
+	//
+	// ISO-IR: International Register of Escape Sequences
+	// Note: The current registration authority is IPSJ/ITSCJ, Japan.
+	// Reference: RFC1345
+	ISOLatinCyrillic MIB = 8
+
+	// ISOLatinArabic is the MIB identifier with IANA name ISO_8859-6:1987 (MIME: ISO-8859-6).
+	//
+	// ISO-IR: International Register of Escape Sequences
+	// Note: The current registration authority is IPSJ/ITSCJ, Japan.
+	// Reference: RFC1345
+	ISOLatinArabic MIB = 9
+
+	// ISOLatinGreek is the MIB identifier with IANA name ISO_8859-7:1987 (MIME: ISO-8859-7).
+	//
+	// ISO-IR: International Register of Escape Sequences
+	// Note: The current registration authority is IPSJ/ITSCJ, Japan.
+	// Reference: RFC1947
+	// Reference: RFC1345
+	ISOLatinGreek MIB = 10
+
+	// ISOLatinHebrew is the MIB identifier with IANA name ISO_8859-8:1988 (MIME: ISO-8859-8).
+	//
+	// ISO-IR: International Register of Escape Sequences
+	// Note: The current registration authority is IPSJ/ITSCJ, Japan.
+	// Reference: RFC1345
+	ISOLatinHebrew MIB = 11
+
+	// ISOLatin5 is the MIB identifier with IANA name ISO_8859-9:1989 (MIME: ISO-8859-9).
+	//
+	// ISO-IR: International Register of Escape Sequences
+	// Note: The current registration authority is IPSJ/ITSCJ, Japan.
+	// Reference: RFC1345
+	ISOLatin5 MIB = 12
+
+	// ISOLatin6 is the MIB identifier with IANA name ISO-8859-10 (MIME: ISO-8859-10).
+	//
+	// ISO-IR: International Register of Escape Sequences
+	// Note: The current registration authority is IPSJ/ITSCJ, Japan.
+	// Reference: RFC1345
+	ISOLatin6 MIB = 13
+
+	// ISOTextComm is the MIB identifier with IANA name ISO_6937-2-add.
+	//
+	// ISO-IR: International Register of Escape Sequences and ISO 6937-2:1983
+	// Note: The current registration authority is IPSJ/ITSCJ, Japan.
+	// Reference: RFC1345
+	ISOTextComm MIB = 14
+
+	// HalfWidthKatakana is the MIB identifier with IANA name JIS_X0201.
+	//
+	// JIS X 0201-1976.   One byte only, this is equivalent to
+	// JIS/Roman (similar to ASCII) plus eight-bit half-width
+	// Katakana
+	// Reference: RFC1345
+	HalfWidthKatakana MIB = 15
+
+	// JISEncoding is the MIB identifier with IANA name JIS_Encoding.
+	//
+	// JIS X 0202-1991.  Uses ISO 2022 escape sequences to
+	// shift code sets as documented in JIS X 0202-1991.
+	JISEncoding MIB = 16
+
+	// ShiftJIS is the MIB identifier with IANA name Shift_JIS (MIME: Shift_JIS).
+	//
+	// This charset is an extension of csHalfWidthKatakana by
+	// adding graphic characters in JIS X 0208.  The CCS's are
+	// JIS X0201:1997 and JIS X0208:1997.  The
+	// complete definition is shown in Appendix 1 of JIS
+	// X0208:1997.
+	// This charset can be used for the top-level media type "text".
+	ShiftJIS MIB = 17
+
+	// EUCPkdFmtJapanese is the MIB identifier with IANA name Extended_UNIX_Code_Packed_Format_for_Japanese (MIME: EUC-JP).
+	//
+	// Standardized by OSF, UNIX International, and UNIX Systems
+	// Laboratories Pacific.  Uses ISO 2022 rules to select
+	// code set 0: US-ASCII (a single 7-bit byte set)
+	// code set 1: JIS X0208-1990 (a double 8-bit byte set)
+	// restricted to A0-FF in both bytes
+	// code set 2: Half Width Katakana (a single 7-bit byte set)
+	// requiring SS2 as the character prefix
+	// code set 3: JIS X0212-1990 (a double 7-bit byte set)
+	// restricted to A0-FF in both bytes
+	// requiring SS3 as the character prefix
+	EUCPkdFmtJapanese MIB = 18
+
+	// EUCFixWidJapanese is the MIB identifier with IANA name Extended_UNIX_Code_Fixed_Width_for_Japanese.
+	//
+	// Used in Japan.  Each character is 2 octets.
+	// code set 0: US-ASCII (a single 7-bit byte set)
+	// 1st byte = 00
+	// 2nd byte = 20-7E
+	// code set 1: JIS X0208-1990 (a double 7-bit byte set)
+	// restricted  to A0-FF in both bytes
+	// code set 2: Half Width Katakana (a single 7-bit byte set)
+	// 1st byte = 00
+	// 2nd byte = A0-FF
+	// code set 3: JIS X0212-1990 (a double 7-bit byte set)
+	// restricted to A0-FF in
+	// the first byte
+	// and 21-7E in the second byte
+	EUCFixWidJapanese MIB = 19
+
+	// ISO4UnitedKingdom is the MIB identifier with IANA name BS_4730.
+	//
+	// ISO-IR: International Register of Escape Sequences
+	// Note: The current registration authority is IPSJ/ITSCJ, Japan.
+	// Reference: RFC1345
+	ISO4UnitedKingdom MIB = 20
+
+	// ISO11SwedishForNames is the MIB identifier with IANA name SEN_850200_C.
+	//
+	// ISO-IR: International Register of Escape Sequences
+	// Note: The current registration authority is IPSJ/ITSCJ, Japan.
+	// Reference: RFC1345
+	ISO11SwedishForNames MIB = 21
+
+	// ISO15Italian is the MIB identifier with IANA name IT.
+	//
+	// ISO-IR: International Register of Escape Sequences
+	// Note: The current registration authority is IPSJ/ITSCJ, Japan.
+	// Reference: RFC1345
+	ISO15Italian MIB = 22
+
+	// ISO17Spanish is the MIB identifier with IANA name ES.
+	//
+	// ISO-IR: International Register of Escape Sequences
+	// Note: The current registration authority is IPSJ/ITSCJ, Japan.
+	// Reference: RFC1345
+	ISO17Spanish MIB = 23
+
+	// ISO21German is the MIB identifier with IANA name DIN_66003.
+	//
+	// ISO-IR: International Register of Escape Sequences
+	// Note: The current registration authority is IPSJ/ITSCJ, Japan.
+	// Reference: RFC1345
+	ISO21German MIB = 24
+
+	// ISO60Norwegian1 is the MIB identifier with IANA name NS_4551-1.
+	//
+	// ISO-IR: International Register of Escape Sequences
+	// Note: The current registration authority is IPSJ/ITSCJ, Japan.
+	// Reference: RFC1345
+	ISO60Norwegian1 MIB = 25
+
+	// ISO69French is the MIB identifier with IANA name NF_Z_62-010.
+	//
+	// ISO-IR: International Register of Escape Sequences
+	// Note: The current registration authority is IPSJ/ITSCJ, Japan.
+	// Reference: RFC1345
+	ISO69French MIB = 26
+
+	// ISO10646UTF1 is the MIB identifier with IANA name ISO-10646-UTF-1.
+	//
+	// Universal Transfer Format (1), this is the multibyte
+	// encoding, that subsets ASCII-7. It does not have byte
+	// ordering issues.
+	ISO10646UTF1 MIB = 27
+
+	// ISO646basic1983 is the MIB identifier with IANA name ISO_646.basic:1983.
+	//
+	// ISO-IR: International Register of Escape Sequences
+	// Note: The current registration authority is IPSJ/ITSCJ, Japan.
+	// Reference: RFC1345
+	ISO646basic1983 MIB = 28
+
+	// INVARIANT is the MIB identifier with IANA name INVARIANT.
+	//
+	// Reference: RFC1345
+	INVARIANT MIB = 29
+
+	// ISO2IntlRefVersion is the MIB identifier with IANA name ISO_646.irv:1983.
+	//
+	// ISO-IR: International Register of Escape Sequences
+	// Note: The current registration authority is IPSJ/ITSCJ, Japan.
+	// Reference: RFC1345
+	ISO2IntlRefVersion MIB = 30
+
+	// NATSSEFI is the MIB identifier with IANA name NATS-SEFI.
+	//
+	// ISO-IR: International Register of Escape Sequences
+	// Note: The current registration authority is IPSJ/ITSCJ, Japan.
+	// Reference: RFC1345
+	NATSSEFI MIB = 31
+
+	// NATSSEFIADD is the MIB identifier with IANA name NATS-SEFI-ADD.
+	//
+	// ISO-IR: International Register of Escape Sequences
+	// Note: The current registration authority is IPSJ/ITSCJ, Japan.
+	// Reference: RFC1345
+	NATSSEFIADD MIB = 32
+
+	// NATSDANO is the MIB identifier with IANA name NATS-DANO.
+	//
+	// ISO-IR: International Register of Escape Sequences
+	// Note: The current registration authority is IPSJ/ITSCJ, Japan.
+	// Reference: RFC1345
+	NATSDANO MIB = 33
+
+	// NATSDANOADD is the MIB identifier with IANA name NATS-DANO-ADD.
+	//
+	// ISO-IR: International Register of Escape Sequences
+	// Note: The current registration authority is IPSJ/ITSCJ, Japan.
+	// Reference: RFC1345
+	NATSDANOADD MIB = 34
+
+	// ISO10Swedish is the MIB identifier with IANA name SEN_850200_B.
+	//
+	// ISO-IR: International Register of Escape Sequences
+	// Note: The current registration authority is IPSJ/ITSCJ, Japan.
+	// Reference: RFC1345
+	ISO10Swedish MIB = 35
+
+	// KSC56011987 is the MIB identifier with IANA name KS_C_5601-1987.
+	//
+	// ISO-IR: International Register of Escape Sequences
+	// Note: The current registration authority is IPSJ/ITSCJ, Japan.
+	// Reference: RFC1345
+	KSC56011987 MIB = 36
+
+	// ISO2022KR is the MIB identifier with IANA name ISO-2022-KR (MIME: ISO-2022-KR).
+	//
+	// rfc1557 (see also KS_C_5601-1987)
+	// Reference: RFC1557
+	ISO2022KR MIB = 37
+
+	// EUCKR is the MIB identifier with IANA name EUC-KR (MIME: EUC-KR).
+	//
+	// rfc1557 (see also KS_C_5861-1992)
+	// Reference: RFC1557
+	EUCKR MIB = 38
+
+	// ISO2022JP is the MIB identifier with IANA name ISO-2022-JP (MIME: ISO-2022-JP).
+	//
+	// rfc1468 (see also rfc2237 )
+	// Reference: RFC1468
+	ISO2022JP MIB = 39
+
+	// ISO2022JP2 is the MIB identifier with IANA name ISO-2022-JP-2 (MIME: ISO-2022-JP-2).
+	//
+	// rfc1554
+	// Reference: RFC1554
+	ISO2022JP2 MIB = 40
+
+	// ISO13JISC6220jp is the MIB identifier with IANA name JIS_C6220-1969-jp.
+	//
+	// ISO-IR: International Register of Escape Sequences
+	// Note: The current registration authority is IPSJ/ITSCJ, Japan.
+	// Reference: RFC1345
+	ISO13JISC6220jp MIB = 41
+
+	// ISO14JISC6220ro is the MIB identifier with IANA name JIS_C6220-1969-ro.
+	//
+	// ISO-IR: International Register of Escape Sequences
+	// Note: The current registration authority is IPSJ/ITSCJ, Japan.
+	// Reference: RFC1345
+	ISO14JISC6220ro MIB = 42
+
+	// ISO16Portuguese is the MIB identifier with IANA name PT.
+	//
+	// ISO-IR: International Register of Escape Sequences
+	// Note: The current registration authority is IPSJ/ITSCJ, Japan.
+	// Reference: RFC1345
+	ISO16Portuguese MIB = 43
+
+	// ISO18Greek7Old is the MIB identifier with IANA name greek7-old.
+	//
+	// ISO-IR: International Register of Escape Sequences
+	// Note: The current registration authority is IPSJ/ITSCJ, Japan.
+	// Reference: RFC1345
+	ISO18Greek7Old MIB = 44
+
+	// ISO19LatinGreek is the MIB identifier with IANA name latin-greek.
+	//
+	// ISO-IR: International Register of Escape Sequences
+	// Note: The current registration authority is IPSJ/ITSCJ, Japan.
+	// Reference: RFC1345
+	ISO19LatinGreek MIB = 45
+
+	// ISO25French is the MIB identifier with IANA name NF_Z_62-010_(1973).
+	//
+	// ISO-IR: International Register of Escape Sequences
+	// Note: The current registration authority is IPSJ/ITSCJ, Japan.
+	// Reference: RFC1345
+	ISO25French MIB = 46
+
+	// ISO27LatinGreek1 is the MIB identifier with IANA name Latin-greek-1.
+	//
+	// ISO-IR: International Register of Escape Sequences
+	// Note: The current registration authority is IPSJ/ITSCJ, Japan.
+	// Reference: RFC1345
+	ISO27LatinGreek1 MIB = 47
+
+	// ISO5427Cyrillic is the MIB identifier with IANA name ISO_5427.
+	//
+	// ISO-IR: International Register of Escape Sequences
+	// Note: The current registration authority is IPSJ/ITSCJ, Japan.
+	// Reference: RFC1345
+	ISO5427Cyrillic MIB = 48
+
+	// ISO42JISC62261978 is the MIB identifier with IANA name JIS_C6226-1978.
+	//
+	// ISO-IR: International Register of Escape Sequences
+	// Note: The current registration authority is IPSJ/ITSCJ, Japan.
+	// Reference: RFC1345
+	ISO42JISC62261978 MIB = 49
+
+	// ISO47BSViewdata is the MIB identifier with IANA name BS_viewdata.
+	//
+	// ISO-IR: International Register of Escape Sequences
+	// Note: The current registration authority is IPSJ/ITSCJ, Japan.
+	// Reference: RFC1345
+	ISO47BSViewdata MIB = 50
+
+	// ISO49INIS is the MIB identifier with IANA name INIS.
+	//
+	// ISO-IR: International Register of Escape Sequences
+	// Note: The current registration authority is IPSJ/ITSCJ, Japan.
+	// Reference: RFC1345
+	ISO49INIS MIB = 51
+
+	// ISO50INIS8 is the MIB identifier with IANA name INIS-8.
+	//
+	// ISO-IR: International Register of Escape Sequences
+	// Note: The current registration authority is IPSJ/ITSCJ, Japan.
+	// Reference: RFC1345
+	ISO50INIS8 MIB = 52
+
+	// ISO51INISCyrillic is the MIB identifier with IANA name INIS-cyrillic.
+	//
+	// ISO-IR: International Register of Escape Sequences
+	// Note: The current registration authority is IPSJ/ITSCJ, Japan.
+	// Reference: RFC1345
+	ISO51INISCyrillic MIB = 53
+
+	// ISO54271981 is the MIB identifier with IANA name ISO_5427:1981.
+	//
+	// ISO-IR: International Register of Escape Sequences
+	// Note: The current registration authority is IPSJ/ITSCJ, Japan.
+	// Reference: RFC1345
+	ISO54271981 MIB = 54
+
+	// ISO5428Greek is the MIB identifier with IANA name ISO_5428:1980.
+	//
+	// ISO-IR: International Register of Escape Sequences
+	// Note: The current registration authority is IPSJ/ITSCJ, Japan.
+	// Reference: RFC1345
+	ISO5428Greek MIB = 55
+
+	// ISO57GB1988 is the MIB identifier with IANA name GB_1988-80.
+	//
+	// ISO-IR: International Register of Escape Sequences
+	// Note: The current registration authority is IPSJ/ITSCJ, Japan.
+	// Reference: RFC1345
+	ISO57GB1988 MIB = 56
+
+	// ISO58GB231280 is the MIB identifier with IANA name GB_2312-80.
+	//
+	// ISO-IR: International Register of Escape Sequences
+	// Note: The current registration authority is IPSJ/ITSCJ, Japan.
+	// Reference: RFC1345
+	ISO58GB231280 MIB = 57
+
+	// ISO61Norwegian2 is the MIB identifier with IANA name NS_4551-2.
+	//
+	// ISO-IR: International Register of Escape Sequences
+	// Note: The current registration authority is IPSJ/ITSCJ, Japan.
+	// Reference: RFC1345
+	ISO61Norwegian2 MIB = 58
+
+	// ISO70VideotexSupp1 is the MIB identifier with IANA name videotex-suppl.
+	//
+	// ISO-IR: International Register of Escape Sequences
+	// Note: The current registration authority is IPSJ/ITSCJ, Japan.
+	// Reference: RFC1345
+	ISO70VideotexSupp1 MIB = 59
+
+	// ISO84Portuguese2 is the MIB identifier with IANA name PT2.
+	//
+	// ISO-IR: International Register of Escape Sequences
+	// Note: The current registration authority is IPSJ/ITSCJ, Japan.
+	// Reference: RFC1345
+	ISO84Portuguese2 MIB = 60
+
+	// ISO85Spanish2 is the MIB identifier with IANA name ES2.
+	//
+	// ISO-IR: International Register of Escape Sequences
+	// Note: The current registration authority is IPSJ/ITSCJ, Japan.
+	// Reference: RFC1345
+	ISO85Spanish2 MIB = 61
+
+	// ISO86Hungarian is the MIB identifier with IANA name MSZ_7795.3.
+	//
+	// ISO-IR: International Register of Escape Sequences
+	// Note: The current registration authority is IPSJ/ITSCJ, Japan.
+	// Reference: RFC1345
+	ISO86Hungarian MIB = 62
+
+	// ISO87JISX0208 is the MIB identifier with IANA name JIS_C6226-1983.
+	//
+	// ISO-IR: International Register of Escape Sequences
+	// Note: The current registration authority is IPSJ/ITSCJ, Japan.
+	// Reference: RFC1345
+	ISO87JISX0208 MIB = 63
+
+	// ISO88Greek7 is the MIB identifier with IANA name greek7.
+	//
+	// ISO-IR: International Register of Escape Sequences
+	// Note: The current registration authority is IPSJ/ITSCJ, Japan.
+	// Reference: RFC1345
+	ISO88Greek7 MIB = 64
+
+	// ISO89ASMO449 is the MIB identifier with IANA name ASMO_449.
+	//
+	// ISO-IR: International Register of Escape Sequences
+	// Note: The current registration authority is IPSJ/ITSCJ, Japan.
+	// Reference: RFC1345
+	ISO89ASMO449 MIB = 65
+
+	// ISO90 is the MIB identifier with IANA name iso-ir-90.
+	//
+	// ISO-IR: International Register of Escape Sequences
+	// Note: The current registration authority is IPSJ/ITSCJ, Japan.
+	// Reference: RFC1345
+	ISO90 MIB = 66
+
+	// ISO91JISC62291984a is the MIB identifier with IANA name JIS_C6229-1984-a.
+	//
+	// ISO-IR: International Register of Escape Sequences
+	// Note: The current registration authority is IPSJ/ITSCJ, Japan.
+	// Reference: RFC1345
+	ISO91JISC62291984a MIB = 67
+
+	// ISO92JISC62991984b is the MIB identifier with IANA name JIS_C6229-1984-b.
+	//
+	// ISO-IR: International Register of Escape Sequences
+	// Note: The current registration authority is IPSJ/ITSCJ, Japan.
+	// Reference: RFC1345
+	ISO92JISC62991984b MIB = 68
+
+	// ISO93JIS62291984badd is the MIB identifier with IANA name JIS_C6229-1984-b-add.
+	//
+	// ISO-IR: International Register of Escape Sequences
+	// Note: The current registration authority is IPSJ/ITSCJ, Japan.
+	// Reference: RFC1345
+	ISO93JIS62291984badd MIB = 69
+
+	// ISO94JIS62291984hand is the MIB identifier with IANA name JIS_C6229-1984-hand.
+	//
+	// ISO-IR: International Register of Escape Sequences
+	// Note: The current registration authority is IPSJ/ITSCJ, Japan.
+	// Reference: RFC1345
+	ISO94JIS62291984hand MIB = 70
+
+	// ISO95JIS62291984handadd is the MIB identifier with IANA name JIS_C6229-1984-hand-add.
+	//
+	// ISO-IR: International Register of Escape Sequences
+	// Note: The current registration authority is IPSJ/ITSCJ, Japan.
+	// Reference: RFC1345
+	ISO95JIS62291984handadd MIB = 71
+
+	// ISO96JISC62291984kana is the MIB identifier with IANA name JIS_C6229-1984-kana.
+	//
+	// ISO-IR: International Register of Escape Sequences
+	// Note: The current registration authority is IPSJ/ITSCJ, Japan.
+	// Reference: RFC1345
+	ISO96JISC62291984kana MIB = 72
+
+	// ISO2033 is the MIB identifier with IANA name ISO_2033-1983.
+	//
+	// ISO-IR: International Register of Escape Sequences
+	// Note: The current registration authority is IPSJ/ITSCJ, Japan.
+	// Reference: RFC1345
+	ISO2033 MIB = 73
+
+	// ISO99NAPLPS is the MIB identifier with IANA name ANSI_X3.110-1983.
+	//
+	// ISO-IR: International Register of Escape Sequences
+	// Note: The current registration authority is IPSJ/ITSCJ, Japan.
+	// Reference: RFC1345
+	ISO99NAPLPS MIB = 74
+
+	// ISO102T617bit is the MIB identifier with IANA name T.61-7bit.
+	//
+	// ISO-IR: International Register of Escape Sequences
+	// Note: The current registration authority is IPSJ/ITSCJ, Japan.
+	// Reference: RFC1345
+	ISO102T617bit MIB = 75
+
+	// ISO103T618bit is the MIB identifier with IANA name T.61-8bit.
+	//
+	// ISO-IR: International Register of Escape Sequences
+	// Note: The current registration authority is IPSJ/ITSCJ, Japan.
+	// Reference: RFC1345
+	ISO103T618bit MIB = 76
+
+	// ISO111ECMACyrillic is the MIB identifier with IANA name ECMA-cyrillic.
+	//
+	// ISO registry
+	ISO111ECMACyrillic MIB = 77
+
+	// ISO121Canadian1 is the MIB identifier with IANA name CSA_Z243.4-1985-1.
+	//
+	// ISO-IR: International Register of Escape Sequences
+	// Note: The current registration authority is IPSJ/ITSCJ, Japan.
+	// Reference: RFC1345
+	ISO121Canadian1 MIB = 78
+
+	// ISO122Canadian2 is the MIB identifier with IANA name CSA_Z243.4-1985-2.
+	//
+	// ISO-IR: International Register of Escape Sequences
+	// Note: The current registration authority is IPSJ/ITSCJ, Japan.
+	// Reference: RFC1345
+	ISO122Canadian2 MIB = 79
+
+	// ISO123CSAZ24341985gr is the MIB identifier with IANA name CSA_Z243.4-1985-gr.
+	//
+	// ISO-IR: International Register of Escape Sequences
+	// Note: The current registration authority is IPSJ/ITSCJ, Japan.
+	// Reference: RFC1345
+	ISO123CSAZ24341985gr MIB = 80
+
+	// ISO88596E is the MIB identifier with IANA name ISO_8859-6-E (MIME: ISO-8859-6-E).
+	//
+	// rfc1556
+	// Reference: RFC1556
+	ISO88596E MIB = 81
+
+	// ISO88596I is the MIB identifier with IANA name ISO_8859-6-I (MIME: ISO-8859-6-I).
+	//
+	// rfc1556
+	// Reference: RFC1556
+	ISO88596I MIB = 82
+
+	// ISO128T101G2 is the MIB identifier with IANA name T.101-G2.
+	//
+	// ISO-IR: International Register of Escape Sequences
+	// Note: The current registration authority is IPSJ/ITSCJ, Japan.
+	// Reference: RFC1345
+	ISO128T101G2 MIB = 83
+
+	// ISO88598E is the MIB identifier with IANA name ISO_8859-8-E (MIME: ISO-8859-8-E).
+	//
+	// rfc1556
+	// Reference: RFC1556
+	ISO88598E MIB = 84
+
+	// ISO88598I is the MIB identifier with IANA name ISO_8859-8-I (MIME: ISO-8859-8-I).
+	//
+	// rfc1556
+	// Reference: RFC1556
+	ISO88598I MIB = 85
+
+	// ISO139CSN369103 is the MIB identifier with IANA name CSN_369103.
+	//
+	// ISO-IR: International Register of Escape Sequences
+	// Note: The current registration authority is IPSJ/ITSCJ, Japan.
+	// Reference: RFC1345
+	ISO139CSN369103 MIB = 86
+
+	// ISO141JUSIB1002 is the MIB identifier with IANA name JUS_I.B1.002.
+	//
+	// ISO-IR: International Register of Escape Sequences
+	// Note: The current registration authority is IPSJ/ITSCJ, Japan.
+	// Reference: RFC1345
+	ISO141JUSIB1002 MIB = 87
+
+	// ISO143IECP271 is the MIB identifier with IANA name IEC_P27-1.
+	//
+	// ISO-IR: International Register of Escape Sequences
+	// Note: The current registration authority is IPSJ/ITSCJ, Japan.
+	// Reference: RFC1345
+	ISO143IECP271 MIB = 88
+
+	// ISO146Serbian is the MIB identifier with IANA name JUS_I.B1.003-serb.
+	//
+	// ISO-IR: International Register of Escape Sequences
+	// Note: The current registration authority is IPSJ/ITSCJ, Japan.
+	// Reference: RFC1345
+	ISO146Serbian MIB = 89
+
+	// ISO147Macedonian is the MIB identifier with IANA name JUS_I.B1.003-mac.
+	//
+	// ISO-IR: International Register of Escape Sequences
+	// Note: The current registration authority is IPSJ/ITSCJ, Japan.
+	// Reference: RFC1345
+	ISO147Macedonian MIB = 90
+
+	// ISO150GreekCCITT is the MIB identifier with IANA name greek-ccitt.
+	//
+	// ISO-IR: International Register of Escape Sequences
+	// Note: The current registration authority is IPSJ/ITSCJ, Japan.
+	// Reference: RFC1345
+	ISO150GreekCCITT MIB = 91
+
+	// ISO151Cuba is the MIB identifier with IANA name NC_NC00-10:81.
+	//
+	// ISO-IR: International Register of Escape Sequences
+	// Note: The current registration authority is IPSJ/ITSCJ, Japan.
+	// Reference: RFC1345
+	ISO151Cuba MIB = 92
+
+	// ISO6937Add is the MIB identifier with IANA name ISO_6937-2-25.
+	//
+	// ISO-IR: International Register of Escape Sequences
+	// Note: The current registration authority is IPSJ/ITSCJ, Japan.
+	// Reference: RFC1345
+	ISO6937Add MIB = 93
+
+	// ISO153GOST1976874 is the MIB identifier with IANA name GOST_19768-74.
+	//
+	// ISO-IR: International Register of Escape Sequences
+	// Note: The current registration authority is IPSJ/ITSCJ, Japan.
+	// Reference: RFC1345
+	ISO153GOST1976874 MIB = 94
+
+	// ISO8859Supp is the MIB identifier with IANA name ISO_8859-supp.
+	//
+	// ISO-IR: International Register of Escape Sequences
+	// Note: The current registration authority is IPSJ/ITSCJ, Japan.
+	// Reference: RFC1345
+	ISO8859Supp MIB = 95
+
+	// ISO10367Box is the MIB identifier with IANA name ISO_10367-box.
+	//
+	// ISO-IR: International Register of Escape Sequences
+	// Note: The current registration authority is IPSJ/ITSCJ, Japan.
+	// Reference: RFC1345
+	ISO10367Box MIB = 96
+
+	// ISO158Lap is the MIB identifier with IANA name latin-lap.
+	//
+	// ISO-IR: International Register of Escape Sequences
+	// Note: The current registration authority is IPSJ/ITSCJ, Japan.
+	// Reference: RFC1345
+	ISO158Lap MIB = 97
+
+	// ISO159JISX02121990 is the MIB identifier with IANA name JIS_X0212-1990.
+	//
+	// ISO-IR: International Register of Escape Sequences
+	// Note: The current registration authority is IPSJ/ITSCJ, Japan.
+	// Reference: RFC1345
+	ISO159JISX02121990 MIB = 98
+
+	// ISO646Danish is the MIB identifier with IANA name DS_2089.
+	//
+	// Danish Standard, DS 2089, February 1974
+	// Reference: RFC1345
+	ISO646Danish MIB = 99
+
+	// USDK is the MIB identifier with IANA name us-dk.
+	//
+	// Reference: RFC1345
+	USDK MIB = 100
+
+	// DKUS is the MIB identifier with IANA name dk-us.
+	//
+	// Reference: RFC1345
+	DKUS MIB = 101
+
+	// KSC5636 is the MIB identifier with IANA name KSC5636.
+	//
+	// Reference: RFC1345
+	KSC5636 MIB = 102
+
+	// Unicode11UTF7 is the MIB identifier with IANA name UNICODE-1-1-UTF-7.
+	//
+	// rfc1642
+	// Reference: RFC1642
+	Unicode11UTF7 MIB = 103
+
+	// ISO2022CN is the MIB identifier with IANA name ISO-2022-CN.
+	//
+	// rfc1922
+	// Reference: RFC1922
+	ISO2022CN MIB = 104
+
+	// ISO2022CNEXT is the MIB identifier with IANA name ISO-2022-CN-EXT.
+	//
+	// rfc1922
+	// Reference: RFC1922
+	ISO2022CNEXT MIB = 105
+
+	// UTF8 is the MIB identifier with IANA name UTF-8.
+	//
+	// rfc3629
+	// Reference: RFC3629
+	UTF8 MIB = 106
+
+	// ISO885913 is the MIB identifier with IANA name ISO-8859-13.
+	//
+	// ISO See https://www.iana.org/assignments/charset-reg/ISO-8859-13 https://www.iana.org/assignments/charset-reg/ISO-8859-13
+	ISO885913 MIB = 109
+
+	// ISO885914 is the MIB identifier with IANA name ISO-8859-14.
+	//
+	// ISO See https://www.iana.org/assignments/charset-reg/ISO-8859-14
+	ISO885914 MIB = 110
+
+	// ISO885915 is the MIB identifier with IANA name ISO-8859-15.
+	//
+	// ISO
+	// Please see: https://www.iana.org/assignments/charset-reg/ISO-8859-15
+	ISO885915 MIB = 111
+
+	// ISO885916 is the MIB identifier with IANA name ISO-8859-16.
+	//
+	// ISO
+	ISO885916 MIB = 112
+
+	// GBK is the MIB identifier with IANA name GBK.
+	//
+	// Chinese IT Standardization Technical Committee
+	// Please see: https://www.iana.org/assignments/charset-reg/GBK
+	GBK MIB = 113
+
+	// GB18030 is the MIB identifier with IANA name GB18030.
+	//
+	// Chinese IT Standardization Technical Committee
+	// Please see: https://www.iana.org/assignments/charset-reg/GB18030
+	GB18030 MIB = 114
+
+	// OSDEBCDICDF0415 is the MIB identifier with IANA name OSD_EBCDIC_DF04_15.
+	//
+	// Fujitsu-Siemens standard mainframe EBCDIC encoding
+	// Please see: https://www.iana.org/assignments/charset-reg/OSD-EBCDIC-DF04-15
+	OSDEBCDICDF0415 MIB = 115
+
+	// OSDEBCDICDF03IRV is the MIB identifier with IANA name OSD_EBCDIC_DF03_IRV.
+	//
+	// Fujitsu-Siemens standard mainframe EBCDIC encoding
+	// Please see: https://www.iana.org/assignments/charset-reg/OSD-EBCDIC-DF03-IRV
+	OSDEBCDICDF03IRV MIB = 116
+
+	// OSDEBCDICDF041 is the MIB identifier with IANA name OSD_EBCDIC_DF04_1.
+	//
+	// Fujitsu-Siemens standard mainframe EBCDIC encoding
+	// Please see: https://www.iana.org/assignments/charset-reg/OSD-EBCDIC-DF04-1
+	OSDEBCDICDF041 MIB = 117
+
+	// ISO115481 is the MIB identifier with IANA name ISO-11548-1.
+	//
+	// See https://www.iana.org/assignments/charset-reg/ISO-11548-1
+	ISO115481 MIB = 118
+
+	// KZ1048 is the MIB identifier with IANA name KZ-1048.
+	//
+	// See https://www.iana.org/assignments/charset-reg/KZ-1048
+	KZ1048 MIB = 119
+
+	// Unicode is the MIB identifier with IANA name ISO-10646-UCS-2.
+	//
+	// the 2-octet Basic Multilingual Plane, aka Unicode
+	// this needs to specify network byte order: the standard
+	// does not specify (it is a 16-bit integer space)
+	Unicode MIB = 1000
+
+	// UCS4 is the MIB identifier with IANA name ISO-10646-UCS-4.
+	//
+	// the full code space. (same comment about byte order,
+	// these are 31-bit numbers.
+	UCS4 MIB = 1001
+
+	// UnicodeASCII is the MIB identifier with IANA name ISO-10646-UCS-Basic.
+	//
+	// ASCII subset of Unicode.  Basic Latin = collection 1
+	// See ISO 10646, Appendix A
+	UnicodeASCII MIB = 1002
+
+	// UnicodeLatin1 is the MIB identifier with IANA name ISO-10646-Unicode-Latin1.
+	//
+	// ISO Latin-1 subset of Unicode. Basic Latin and Latin-1
+	// Supplement  = collections 1 and 2.  See ISO 10646,
+	// Appendix A.  See rfc1815 .
+	UnicodeLatin1 MIB = 1003
+
+	// UnicodeJapanese is the MIB identifier with IANA name ISO-10646-J-1.
+	//
+	// ISO 10646 Japanese, see rfc1815 .
+	UnicodeJapanese MIB = 1004
+
+	// UnicodeIBM1261 is the MIB identifier with IANA name ISO-Unicode-IBM-1261.
+	//
+	// IBM Latin-2, -3, -5, Extended Presentation Set, GCSGID: 1261
+	UnicodeIBM1261 MIB = 1005
+
+	// UnicodeIBM1268 is the MIB identifier with IANA name ISO-Unicode-IBM-1268.
+	//
+	// IBM Latin-4 Extended Presentation Set, GCSGID: 1268
+	UnicodeIBM1268 MIB = 1006
+
+	// UnicodeIBM1276 is the MIB identifier with IANA name ISO-Unicode-IBM-1276.
+	//
+	// IBM Cyrillic Greek Extended Presentation Set, GCSGID: 1276
+	UnicodeIBM1276 MIB = 1007
+
+	// UnicodeIBM1264 is the MIB identifier with IANA name ISO-Unicode-IBM-1264.
+	//
+	// IBM Arabic Presentation Set, GCSGID: 1264
+	UnicodeIBM1264 MIB = 1008
+
+	// UnicodeIBM1265 is the MIB identifier with IANA name ISO-Unicode-IBM-1265.
+	//
+	// IBM Hebrew Presentation Set, GCSGID: 1265
+	UnicodeIBM1265 MIB = 1009
+
+	// Unicode11 is the MIB identifier with IANA name UNICODE-1-1.
+	//
+	// rfc1641
+	// Reference: RFC1641
+	Unicode11 MIB = 1010
+
+	// SCSU is the MIB identifier with IANA name SCSU.
+	//
+	// SCSU See https://www.iana.org/assignments/charset-reg/SCSU
+	SCSU MIB = 1011
+
+	// UTF7 is the MIB identifier with IANA name UTF-7.
+	//
+	// rfc2152
+	// Reference: RFC2152
+	UTF7 MIB = 1012
+
+	// UTF16BE is the MIB identifier with IANA name UTF-16BE.
+	//
+	// rfc2781
+	// Reference: RFC2781
+	UTF16BE MIB = 1013
+
+	// UTF16LE is the MIB identifier with IANA name UTF-16LE.
+	//
+	// rfc2781
+	// Reference: RFC2781
+	UTF16LE MIB = 1014
+
+	// UTF16 is the MIB identifier with IANA name UTF-16.
+	//
+	// rfc2781
+	// Reference: RFC2781
+	UTF16 MIB = 1015
+
+	// CESU8 is the MIB identifier with IANA name CESU-8.
+	//
+	// https://www.unicode.org/reports/tr26
+	CESU8 MIB = 1016
+
+	// UTF32 is the MIB identifier with IANA name UTF-32.
+	//
+	// https://www.unicode.org/reports/tr19/
+	UTF32 MIB = 1017
+
+	// UTF32BE is the MIB identifier with IANA name UTF-32BE.
+	//
+	// https://www.unicode.org/reports/tr19/
+	UTF32BE MIB = 1018
+
+	// UTF32LE is the MIB identifier with IANA name UTF-32LE.
+	//
+	// https://www.unicode.org/reports/tr19/
+	UTF32LE MIB = 1019
+
+	// BOCU1 is the MIB identifier with IANA name BOCU-1.
+	//
+	// https://www.unicode.org/notes/tn6/
+	BOCU1 MIB = 1020
+
+	// Windows30Latin1 is the MIB identifier with IANA name ISO-8859-1-Windows-3.0-Latin-1.
+	//
+	// Extended ISO 8859-1 Latin-1 for Windows 3.0.
+	// PCL Symbol Set id: 9U
+	Windows30Latin1 MIB = 2000
+
+	// Windows31Latin1 is the MIB identifier with IANA name ISO-8859-1-Windows-3.1-Latin-1.
+	//
+	// Extended ISO 8859-1 Latin-1 for Windows 3.1.
+	// PCL Symbol Set id: 19U
+	Windows31Latin1 MIB = 2001
+
+	// Windows31Latin2 is the MIB identifier with IANA name ISO-8859-2-Windows-Latin-2.
+	//
+	// Extended ISO 8859-2.  Latin-2 for Windows 3.1.
+	// PCL Symbol Set id: 9E
+	Windows31Latin2 MIB = 2002
+
+	// Windows31Latin5 is the MIB identifier with IANA name ISO-8859-9-Windows-Latin-5.
+	//
+	// Extended ISO 8859-9.  Latin-5 for Windows 3.1
+	// PCL Symbol Set id: 5T
+	Windows31Latin5 MIB = 2003
+
+	// HPRoman8 is the MIB identifier with IANA name hp-roman8.
+	//
+	// LaserJet IIP Printer User's Manual,
+	// HP part no 33471-90901, Hewlet-Packard, June 1989.
+	// Reference: RFC1345
+	HPRoman8 MIB = 2004
+
+	// AdobeStandardEncoding is the MIB identifier with IANA name Adobe-Standard-Encoding.
+	//
+	// PostScript Language Reference Manual
+	// PCL Symbol Set id: 10J
+	AdobeStandardEncoding MIB = 2005
+
+	// VenturaUS is the MIB identifier with IANA name Ventura-US.
+	//
+	// Ventura US.  ASCII plus characters typically used in
+	// publishing, like pilcrow, copyright, registered, trade mark,
+	// section, dagger, and double dagger in the range A0 (hex)
+	// to FF (hex).
+	// PCL Symbol Set id: 14J
+	VenturaUS MIB = 2006
+
+	// VenturaInternational is the MIB identifier with IANA name Ventura-International.
+	//
+	// Ventura International.  ASCII plus coded characters similar
+	// to Roman8.
+	// PCL Symbol Set id: 13J
+	VenturaInternational MIB = 2007
+
+	// DECMCS is the MIB identifier with IANA name DEC-MCS.
+	//
+	// VAX/VMS User's Manual,
+	// Order Number: AI-Y517A-TE, April 1986.
+	// Reference: RFC1345
+	DECMCS MIB = 2008
+
+	// PC850Multilingual is the MIB identifier with IANA name IBM850.
+	//
+	// IBM NLS RM Vol2 SE09-8002-01, March 1990
+	// Reference: RFC1345
+	PC850Multilingual MIB = 2009
+
+	// PC8DanishNorwegian is the MIB identifier with IANA name PC8-Danish-Norwegian.
+	//
+	// PC Danish Norwegian
+	// 8-bit PC set for Danish Norwegian
+	// PCL Symbol Set id: 11U
+	PC8DanishNorwegian MIB = 2012
+
+	// PC862LatinHebrew is the MIB identifier with IANA name IBM862.
+	//
+	// IBM NLS RM Vol2 SE09-8002-01, March 1990
+	// Reference: RFC1345
+	PC862LatinHebrew MIB = 2013
+
+	// PC8Turkish is the MIB identifier with IANA name PC8-Turkish.
+	//
+	// PC Latin Turkish.  PCL Symbol Set id: 9T
+	PC8Turkish MIB = 2014
+
+	// IBMSymbols is the MIB identifier with IANA name IBM-Symbols.
+	//
+	// Presentation Set, CPGID: 259
+	IBMSymbols MIB = 2015
+
+	// IBMThai is the MIB identifier with IANA name IBM-Thai.
+	//
+	// Presentation Set, CPGID: 838
+	IBMThai MIB = 2016
+
+	// HPLegal is the MIB identifier with IANA name HP-Legal.
+	//
+	// PCL 5 Comparison Guide, Hewlett-Packard,
+	// HP part number 5961-0510, October 1992
+	// PCL Symbol Set id: 1U
+	HPLegal MIB = 2017
+
+	// HPPiFont is the MIB identifier with IANA name HP-Pi-font.
+	//
+	// PCL 5 Comparison Guide, Hewlett-Packard,
+	// HP part number 5961-0510, October 1992
+	// PCL Symbol Set id: 15U
+	HPPiFont MIB = 2018
+
+	// HPMath8 is the MIB identifier with IANA name HP-Math8.
+	//
+	// PCL 5 Comparison Guide, Hewlett-Packard,
+	// HP part number 5961-0510, October 1992
+	// PCL Symbol Set id: 8M
+	HPMath8 MIB = 2019
+
+	// HPPSMath is the MIB identifier with IANA name Adobe-Symbol-Encoding.
+	//
+	// PostScript Language Reference Manual
+	// PCL Symbol Set id: 5M
+	HPPSMath MIB = 2020
+
+	// HPDesktop is the MIB identifier with IANA name HP-DeskTop.
+	//
+	// PCL 5 Comparison Guide, Hewlett-Packard,
+	// HP part number 5961-0510, October 1992
+	// PCL Symbol Set id: 7J
+	HPDesktop MIB = 2021
+
+	// VenturaMath is the MIB identifier with IANA name Ventura-Math.
+	//
+	// PCL 5 Comparison Guide, Hewlett-Packard,
+	// HP part number 5961-0510, October 1992
+	// PCL Symbol Set id: 6M
+	VenturaMath MIB = 2022
+
+	// MicrosoftPublishing is the MIB identifier with IANA name Microsoft-Publishing.
+	//
+	// PCL 5 Comparison Guide, Hewlett-Packard,
+	// HP part number 5961-0510, October 1992
+	// PCL Symbol Set id: 6J
+	MicrosoftPublishing MIB = 2023
+
+	// Windows31J is the MIB identifier with IANA name Windows-31J.
+	//
+	// Windows Japanese.  A further extension of Shift_JIS
+	// to include NEC special characters (Row 13), NEC
+	// selection of IBM extensions (Rows 89 to 92), and IBM
+	// extensions (Rows 115 to 119).  The CCS's are
+	// JIS X0201:1997, JIS X0208:1997, and these extensions.
+	// This charset can be used for the top-level media type "text",
+	// but it is of limited or specialized use (see rfc2278 ).
+	// PCL Symbol Set id: 19K
+	Windows31J MIB = 2024
+
+	// GB2312 is the MIB identifier with IANA name GB2312 (MIME: GB2312).
+	//
+	// Chinese for People's Republic of China (PRC) mixed one byte,
+	// two byte set:
+	// 20-7E = one byte ASCII
+	// A1-FE = two byte PRC Kanji
+	// See GB 2312-80
+	// PCL Symbol Set Id: 18C
+	GB2312 MIB = 2025
+
+	// Big5 is the MIB identifier with IANA name Big5 (MIME: Big5).
+	//
+	// Chinese for Taiwan Multi-byte set.
+	// PCL Symbol Set Id: 18T
+	Big5 MIB = 2026
+
+	// Macintosh is the MIB identifier with IANA name macintosh.
+	//
+	// The Unicode Standard ver1.0, ISBN 0-201-56788-1, Oct 1991
+	// Reference: RFC1345
+	Macintosh MIB = 2027
+
+	// IBM037 is the MIB identifier with IANA name IBM037.
+	//
+	// IBM NLS RM Vol2 SE09-8002-01, March 1990
+	// Reference: RFC1345
+	IBM037 MIB = 2028
+
+	// IBM038 is the MIB identifier with IANA name IBM038.
+	//
+	// IBM 3174 Character Set Ref, GA27-3831-02, March 1990
+	// Reference: RFC1345
+	IBM038 MIB = 2029
+
+	// IBM273 is the MIB identifier with IANA name IBM273.
+	//
+	// IBM NLS RM Vol2 SE09-8002-01, March 1990
+	// Reference: RFC1345
+	IBM273 MIB = 2030
+
+	// IBM274 is the MIB identifier with IANA name IBM274.
+	//
+	// IBM 3174 Character Set Ref, GA27-3831-02, March 1990
+	// Reference: RFC1345
+	IBM274 MIB = 2031
+
+	// IBM275 is the MIB identifier with IANA name IBM275.
+	//
+	// IBM NLS RM Vol2 SE09-8002-01, March 1990
+	// Reference: RFC1345
+	IBM275 MIB = 2032
+
+	// IBM277 is the MIB identifier with IANA name IBM277.
+	//
+	// IBM NLS RM Vol2 SE09-8002-01, March 1990
+	// Reference: RFC1345
+	IBM277 MIB = 2033
+
+	// IBM278 is the MIB identifier with IANA name IBM278.
+	//
+	// IBM NLS RM Vol2 SE09-8002-01, March 1990
+	// Reference: RFC1345
+	IBM278 MIB = 2034
+
+	// IBM280 is the MIB identifier with IANA name IBM280.
+	//
+	// IBM NLS RM Vol2 SE09-8002-01, March 1990
+	// Reference: RFC1345
+	IBM280 MIB = 2035
+
+	// IBM281 is the MIB identifier with IANA name IBM281.
+	//
+	// IBM 3174 Character Set Ref, GA27-3831-02, March 1990
+	// Reference: RFC1345
+	IBM281 MIB = 2036
+
+	// IBM284 is the MIB identifier with IANA name IBM284.
+	//
+	// IBM NLS RM Vol2 SE09-8002-01, March 1990
+	// Reference: RFC1345
+	IBM284 MIB = 2037
+
+	// IBM285 is the MIB identifier with IANA name IBM285.
+	//
+	// IBM NLS RM Vol2 SE09-8002-01, March 1990
+	// Reference: RFC1345
+	IBM285 MIB = 2038
+
+	// IBM290 is the MIB identifier with IANA name IBM290.
+	//
+	// IBM 3174 Character Set Ref, GA27-3831-02, March 1990
+	// Reference: RFC1345
+	IBM290 MIB = 2039
+
+	// IBM297 is the MIB identifier with IANA name IBM297.
+	//
+	// IBM NLS RM Vol2 SE09-8002-01, March 1990
+	// Reference: RFC1345
+	IBM297 MIB = 2040
+
+	// IBM420 is the MIB identifier with IANA name IBM420.
+	//
+	// IBM NLS RM Vol2 SE09-8002-01, March 1990,
+	// IBM NLS RM p 11-11
+	// Reference: RFC1345
+	IBM420 MIB = 2041
+
+	// IBM423 is the MIB identifier with IANA name IBM423.
+	//
+	// IBM NLS RM Vol2 SE09-8002-01, March 1990
+	// Reference: RFC1345
+	IBM423 MIB = 2042
+
+	// IBM424 is the MIB identifier with IANA name IBM424.
+	//
+	// IBM NLS RM Vol2 SE09-8002-01, March 1990
+	// Reference: RFC1345
+	IBM424 MIB = 2043
+
+	// PC8CodePage437 is the MIB identifier with IANA name IBM437.
+	//
+	// IBM NLS RM Vol2 SE09-8002-01, March 1990
+	// Reference: RFC1345
+	PC8CodePage437 MIB = 2011
+
+	// IBM500 is the MIB identifier with IANA name IBM500.
+	//
+	// IBM NLS RM Vol2 SE09-8002-01, March 1990
+	// Reference: RFC1345
+	IBM500 MIB = 2044
+
+	// IBM851 is the MIB identifier with IANA name IBM851.
+	//
+	// IBM NLS RM Vol2 SE09-8002-01, March 1990
+	// Reference: RFC1345
+	IBM851 MIB = 2045
+
+	// PCp852 is the MIB identifier with IANA name IBM852.
+	//
+	// IBM NLS RM Vol2 SE09-8002-01, March 1990
+	// Reference: RFC1345
+	PCp852 MIB = 2010
+
+	// IBM855 is the MIB identifier with IANA name IBM855.
+	//
+	// IBM NLS RM Vol2 SE09-8002-01, March 1990
+	// Reference: RFC1345
+	IBM855 MIB = 2046
+
+	// IBM857 is the MIB identifier with IANA name IBM857.
+	//
+	// IBM NLS RM Vol2 SE09-8002-01, March 1990
+	// Reference: RFC1345
+	IBM857 MIB = 2047
+
+	// IBM860 is the MIB identifier with IANA name IBM860.
+	//
+	// IBM NLS RM Vol2 SE09-8002-01, March 1990
+	// Reference: RFC1345
+	IBM860 MIB = 2048
+
+	// IBM861 is the MIB identifier with IANA name IBM861.
+	//
+	// IBM NLS RM Vol2 SE09-8002-01, March 1990
+	// Reference: RFC1345
+	IBM861 MIB = 2049
+
+	// IBM863 is the MIB identifier with IANA name IBM863.
+	//
+	// IBM Keyboard layouts and code pages, PN 07G4586 June 1991
+	// Reference: RFC1345
+	IBM863 MIB = 2050
+
+	// IBM864 is the MIB identifier with IANA name IBM864.
+	//
+	// IBM Keyboard layouts and code pages, PN 07G4586 June 1991
+	// Reference: RFC1345
+	IBM864 MIB = 2051
+
+	// IBM865 is the MIB identifier with IANA name IBM865.
+	//
+	// IBM DOS 3.3 Ref (Abridged), 94X9575 (Feb 1987)
+	// Reference: RFC1345
+	IBM865 MIB = 2052
+
+	// IBM868 is the MIB identifier with IANA name IBM868.
+	//
+	// IBM NLS RM Vol2 SE09-8002-01, March 1990
+	// Reference: RFC1345
+	IBM868 MIB = 2053
+
+	// IBM869 is the MIB identifier with IANA name IBM869.
+	//
+	// IBM Keyboard layouts and code pages, PN 07G4586 June 1991
+	// Reference: RFC1345
+	IBM869 MIB = 2054
+
+	// IBM870 is the MIB identifier with IANA name IBM870.
+	//
+	// IBM NLS RM Vol2 SE09-8002-01, March 1990
+	// Reference: RFC1345
+	IBM870 MIB = 2055
+
+	// IBM871 is the MIB identifier with IANA name IBM871.
+	//
+	// IBM NLS RM Vol2 SE09-8002-01, March 1990
+	// Reference: RFC1345
+	IBM871 MIB = 2056
+
+	// IBM880 is the MIB identifier with IANA name IBM880.
+	//
+	// IBM NLS RM Vol2 SE09-8002-01, March 1990
+	// Reference: RFC1345
+	IBM880 MIB = 2057
+
+	// IBM891 is the MIB identifier with IANA name IBM891.
+	//
+	// IBM NLS RM Vol2 SE09-8002-01, March 1990
+	// Reference: RFC1345
+	IBM891 MIB = 2058
+
+	// IBM903 is the MIB identifier with IANA name IBM903.
+	//
+	// IBM NLS RM Vol2 SE09-8002-01, March 1990
+	// Reference: RFC1345
+	IBM903 MIB = 2059
+
+	// IBBM904 is the MIB identifier with IANA name IBM904.
+	//
+	// IBM NLS RM Vol2 SE09-8002-01, March 1990
+	// Reference: RFC1345
+	IBBM904 MIB = 2060
+
+	// IBM905 is the MIB identifier with IANA name IBM905.
+	//
+	// IBM 3174 Character Set Ref, GA27-3831-02, March 1990
+	// Reference: RFC1345
+	IBM905 MIB = 2061
+
+	// IBM918 is the MIB identifier with IANA name IBM918.
+	//
+	// IBM NLS RM Vol2 SE09-8002-01, March 1990
+	// Reference: RFC1345
+	IBM918 MIB = 2062
+
+	// IBM1026 is the MIB identifier with IANA name IBM1026.
+	//
+	// IBM NLS RM Vol2 SE09-8002-01, March 1990
+	// Reference: RFC1345
+	IBM1026 MIB = 2063
+
+	// IBMEBCDICATDE is the MIB identifier with IANA name EBCDIC-AT-DE.
+	//
+	// IBM 3270 Char Set Ref Ch 10, GA27-2837-9, April 1987
+	// Reference: RFC1345
+	IBMEBCDICATDE MIB = 2064
+
+	// EBCDICATDEA is the MIB identifier with IANA name EBCDIC-AT-DE-A.
+	//
+	// IBM 3270 Char Set Ref Ch 10, GA27-2837-9, April 1987
+	// Reference: RFC1345
+	EBCDICATDEA MIB = 2065
+
+	// EBCDICCAFR is the MIB identifier with IANA name EBCDIC-CA-FR.
+	//
+	// IBM 3270 Char Set Ref Ch 10, GA27-2837-9, April 1987
+	// Reference: RFC1345
+	EBCDICCAFR MIB = 2066
+
+	// EBCDICDKNO is the MIB identifier with IANA name EBCDIC-DK-NO.
+	//
+	// IBM 3270 Char Set Ref Ch 10, GA27-2837-9, April 1987
+	// Reference: RFC1345
+	EBCDICDKNO MIB = 2067
+
+	// EBCDICDKNOA is the MIB identifier with IANA name EBCDIC-DK-NO-A.
+	//
+	// IBM 3270 Char Set Ref Ch 10, GA27-2837-9, April 1987
+	// Reference: RFC1345
+	EBCDICDKNOA MIB = 2068
+
+	// EBCDICFISE is the MIB identifier with IANA name EBCDIC-FI-SE.
+	//
+	// IBM 3270 Char Set Ref Ch 10, GA27-2837-9, April 1987
+	// Reference: RFC1345
+	EBCDICFISE MIB = 2069
+
+	// EBCDICFISEA is the MIB identifier with IANA name EBCDIC-FI-SE-A.
+	//
+	// IBM 3270 Char Set Ref Ch 10, GA27-2837-9, April 1987
+	// Reference: RFC1345
+	EBCDICFISEA MIB = 2070
+
+	// EBCDICFR is the MIB identifier with IANA name EBCDIC-FR.
+	//
+	// IBM 3270 Char Set Ref Ch 10, GA27-2837-9, April 1987
+	// Reference: RFC1345
+	EBCDICFR MIB = 2071
+
+	// EBCDICIT is the MIB identifier with IANA name EBCDIC-IT.
+	//
+	// IBM 3270 Char Set Ref Ch 10, GA27-2837-9, April 1987
+	// Reference: RFC1345
+	EBCDICIT MIB = 2072
+
+	// EBCDICPT is the MIB identifier with IANA name EBCDIC-PT.
+	//
+	// IBM 3270 Char Set Ref Ch 10, GA27-2837-9, April 1987
+	// Reference: RFC1345
+	EBCDICPT MIB = 2073
+
+	// EBCDICES is the MIB identifier with IANA name EBCDIC-ES.
+	//
+	// IBM 3270 Char Set Ref Ch 10, GA27-2837-9, April 1987
+	// Reference: RFC1345
+	EBCDICES MIB = 2074
+
+	// EBCDICESA is the MIB identifier with IANA name EBCDIC-ES-A.
+	//
+	// IBM 3270 Char Set Ref Ch 10, GA27-2837-9, April 1987
+	// Reference: RFC1345
+	EBCDICESA MIB = 2075
+
+	// EBCDICESS is the MIB identifier with IANA name EBCDIC-ES-S.
+	//
+	// IBM 3270 Char Set Ref Ch 10, GA27-2837-9, April 1987
+	// Reference: RFC1345
+	EBCDICESS MIB = 2076
+
+	// EBCDICUK is the MIB identifier with IANA name EBCDIC-UK.
+	//
+	// IBM 3270 Char Set Ref Ch 10, GA27-2837-9, April 1987
+	// Reference: RFC1345
+	EBCDICUK MIB = 2077
+
+	// EBCDICUS is the MIB identifier with IANA name EBCDIC-US.
+	//
+	// IBM 3270 Char Set Ref Ch 10, GA27-2837-9, April 1987
+	// Reference: RFC1345
+	EBCDICUS MIB = 2078
+
+	// Unknown8BiT is the MIB identifier with IANA name UNKNOWN-8BIT.
+	//
+	// Reference: RFC1428
+	Unknown8BiT MIB = 2079
+
+	// Mnemonic is the MIB identifier with IANA name MNEMONIC.
+	//
+	// rfc1345 , also known as "mnemonic+ascii+38"
+	// Reference: RFC1345
+	Mnemonic MIB = 2080
+
+	// Mnem is the MIB identifier with IANA name MNEM.
+	//
+	// rfc1345 , also known as "mnemonic+ascii+8200"
+	// Reference: RFC1345
+	Mnem MIB = 2081
+
+	// VISCII is the MIB identifier with IANA name VISCII.
+	//
+	// rfc1456
+	// Reference: RFC1456
+	VISCII MIB = 2082
+
+	// VIQR is the MIB identifier with IANA name VIQR.
+	//
+	// rfc1456
+	// Reference: RFC1456
+	VIQR MIB = 2083
+
+	// KOI8R is the MIB identifier with IANA name KOI8-R (MIME: KOI8-R).
+	//
+	// rfc1489 , based on GOST-19768-74, ISO-6937/8,
+	// INIS-Cyrillic, ISO-5427.
+	// Reference: RFC1489
+	KOI8R MIB = 2084
+
+	// HZGB2312 is the MIB identifier with IANA name HZ-GB-2312.
+	//
+	// rfc1842 , rfc1843 rfc1843 rfc1842
+	HZGB2312 MIB = 2085
+
+	// IBM866 is the MIB identifier with IANA name IBM866.
+	//
+	// IBM NLDG Volume 2 (SE09-8002-03) August 1994
+	IBM866 MIB = 2086
+
+	// PC775Baltic is the MIB identifier with IANA name IBM775.
+	//
+	// HP PCL 5 Comparison Guide (P/N 5021-0329) pp B-13, 1996
+	PC775Baltic MIB = 2087
+
+	// KOI8U is the MIB identifier with IANA name KOI8-U.
+	//
+	// rfc2319
+	// Reference: RFC2319
+	KOI8U MIB = 2088
+
+	// IBM00858 is the MIB identifier with IANA name IBM00858.
+	//
+	// IBM See https://www.iana.org/assignments/charset-reg/IBM00858
+	IBM00858 MIB = 2089
+
+	// IBM00924 is the MIB identifier with IANA name IBM00924.
+	//
+	// IBM See https://www.iana.org/assignments/charset-reg/IBM00924
+	IBM00924 MIB = 2090
+
+	// IBM01140 is the MIB identifier with IANA name IBM01140.
+	//
+	// IBM See https://www.iana.org/assignments/charset-reg/IBM01140
+	IBM01140 MIB = 2091
+
+	// IBM01141 is the MIB identifier with IANA name IBM01141.
+	//
+	// IBM See https://www.iana.org/assignments/charset-reg/IBM01141
+	IBM01141 MIB = 2092
+
+	// IBM01142 is the MIB identifier with IANA name IBM01142.
+	//
+	// IBM See https://www.iana.org/assignments/charset-reg/IBM01142
+	IBM01142 MIB = 2093
+
+	// IBM01143 is the MIB identifier with IANA name IBM01143.
+	//
+	// IBM See https://www.iana.org/assignments/charset-reg/IBM01143
+	IBM01143 MIB = 2094
+
+	// IBM01144 is the MIB identifier with IANA name IBM01144.
+	//
+	// IBM See https://www.iana.org/assignments/charset-reg/IBM01144
+	IBM01144 MIB = 2095
+
+	// IBM01145 is the MIB identifier with IANA name IBM01145.
+	//
+	// IBM See https://www.iana.org/assignments/charset-reg/IBM01145
+	IBM01145 MIB = 2096
+
+	// IBM01146 is the MIB identifier with IANA name IBM01146.
+	//
+	// IBM See https://www.iana.org/assignments/charset-reg/IBM01146
+	IBM01146 MIB = 2097
+
+	// IBM01147 is the MIB identifier with IANA name IBM01147.
+	//
+	// IBM See https://www.iana.org/assignments/charset-reg/IBM01147
+	IBM01147 MIB = 2098
+
+	// IBM01148 is the MIB identifier with IANA name IBM01148.
+	//
+	// IBM See https://www.iana.org/assignments/charset-reg/IBM01148
+	IBM01148 MIB = 2099
+
+	// IBM01149 is the MIB identifier with IANA name IBM01149.
+	//
+	// IBM See https://www.iana.org/assignments/charset-reg/IBM01149
+	IBM01149 MIB = 2100
+
+	// Big5HKSCS is the MIB identifier with IANA name Big5-HKSCS.
+	//
+	// See https://www.iana.org/assignments/charset-reg/Big5-HKSCS
+	Big5HKSCS MIB = 2101
+
+	// IBM1047 is the MIB identifier with IANA name IBM1047.
+	//
+	// IBM1047 (EBCDIC Latin 1/Open Systems) https://www-1.ibm.com/servers/eserver/iseries/software/globalization/pdf/cp01047z.pdf
+	IBM1047 MIB = 2102
+
+	// PTCP154 is the MIB identifier with IANA name PTCP154.
+	//
+	// See https://www.iana.org/assignments/charset-reg/PTCP154
+	PTCP154 MIB = 2103
+
+	// Amiga1251 is the MIB identifier with IANA name Amiga-1251.
+	//
+	// See https://www.amiga.ultranet.ru/Amiga-1251.html
+	Amiga1251 MIB = 2104
+
+	// KOI7switched is the MIB identifier with IANA name KOI7-switched.
+	//
+	// See https://www.iana.org/assignments/charset-reg/KOI7-switched
+	KOI7switched MIB = 2105
+
+	// BRF is the MIB identifier with IANA name BRF.
+	//
+	// See https://www.iana.org/assignments/charset-reg/BRF
+	BRF MIB = 2106
+
+	// TSCII is the MIB identifier with IANA name TSCII.
+	//
+	// See https://www.iana.org/assignments/charset-reg/TSCII
+	TSCII MIB = 2107
+
+	// CP51932 is the MIB identifier with IANA name CP51932.
+	//
+	// See https://www.iana.org/assignments/charset-reg/CP51932
+	CP51932 MIB = 2108
+
+	// Windows874 is the MIB identifier with IANA name windows-874.
+	//
+	// See https://www.iana.org/assignments/charset-reg/windows-874
+	Windows874 MIB = 2109
+
+	// Windows1250 is the MIB identifier with IANA name windows-1250.
+	//
+	// Microsoft https://www.iana.org/assignments/charset-reg/windows-1250
+	Windows1250 MIB = 2250
+
+	// Windows1251 is the MIB identifier with IANA name windows-1251.
+	//
+	// Microsoft https://www.iana.org/assignments/charset-reg/windows-1251
+	Windows1251 MIB = 2251
+
+	// Windows1252 is the MIB identifier with IANA name windows-1252.
+	//
+	// Microsoft https://www.iana.org/assignments/charset-reg/windows-1252
+	Windows1252 MIB = 2252
+
+	// Windows1253 is the MIB identifier with IANA name windows-1253.
+	//
+	// Microsoft https://www.iana.org/assignments/charset-reg/windows-1253
+	Windows1253 MIB = 2253
+
+	// Windows1254 is the MIB identifier with IANA name windows-1254.
+	//
+	// Microsoft https://www.iana.org/assignments/charset-reg/windows-1254
+	Windows1254 MIB = 2254
+
+	// Windows1255 is the MIB identifier with IANA name windows-1255.
+	//
+	// Microsoft https://www.iana.org/assignments/charset-reg/windows-1255
+	Windows1255 MIB = 2255
+
+	// Windows1256 is the MIB identifier with IANA name windows-1256.
+	//
+	// Microsoft https://www.iana.org/assignments/charset-reg/windows-1256
+	Windows1256 MIB = 2256
+
+	// Windows1257 is the MIB identifier with IANA name windows-1257.
+	//
+	// Microsoft https://www.iana.org/assignments/charset-reg/windows-1257
+	Windows1257 MIB = 2257
+
+	// Windows1258 is the MIB identifier with IANA name windows-1258.
+	//
+	// Microsoft https://www.iana.org/assignments/charset-reg/windows-1258
+	Windows1258 MIB = 2258
+
+	// TIS620 is the MIB identifier with IANA name TIS-620.
+	//
+	// Thai Industrial Standards Institute (TISI)
+	TIS620 MIB = 2259
+
+	// CP50220 is the MIB identifier with IANA name CP50220.
+	//
+	// See https://www.iana.org/assignments/charset-reg/CP50220
+	CP50220 MIB = 2260
+)
diff --git a/vendor/golang.org/x/text/encoding/internal/internal.go b/vendor/golang.org/x/text/encoding/internal/internal.go
new file mode 100644
index 00000000..75a5fd16
--- /dev/null
+++ b/vendor/golang.org/x/text/encoding/internal/internal.go
@@ -0,0 +1,75 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package internal contains code that is shared among encoding implementations.
+package internal
+
+import (
+	"golang.org/x/text/encoding"
+	"golang.org/x/text/encoding/internal/identifier"
+	"golang.org/x/text/transform"
+)
+
+// Encoding is an implementation of the Encoding interface that adds the String
+// and ID methods to an existing encoding.
+type Encoding struct {
+	encoding.Encoding
+	Name string
+	MIB  identifier.MIB
+}
+
+// _ verifies that Encoding implements identifier.Interface.
+var _ identifier.Interface = (*Encoding)(nil)
+
+func (e *Encoding) String() string {
+	return e.Name
+}
+
+func (e *Encoding) ID() (mib identifier.MIB, other string) {
+	return e.MIB, ""
+}
+
+// SimpleEncoding is an Encoding that combines two Transformers.
+type SimpleEncoding struct {
+	Decoder transform.Transformer
+	Encoder transform.Transformer
+}
+
+func (e *SimpleEncoding) NewDecoder() *encoding.Decoder {
+	return &encoding.Decoder{Transformer: e.Decoder}
+}
+
+func (e *SimpleEncoding) NewEncoder() *encoding.Encoder {
+	return &encoding.Encoder{Transformer: e.Encoder}
+}
+
+// FuncEncoding is an Encoding that combines two functions returning a new
+// Transformer.
+type FuncEncoding struct {
+	Decoder func() transform.Transformer
+	Encoder func() transform.Transformer
+}
+
+func (e FuncEncoding) NewDecoder() *encoding.Decoder {
+	return &encoding.Decoder{Transformer: e.Decoder()}
+}
+
+func (e FuncEncoding) NewEncoder() *encoding.Encoder {
+	return &encoding.Encoder{Transformer: e.Encoder()}
+}
+
+// A RepertoireError indicates a rune is not in the repertoire of a destination
+// encoding. It is associated with an encoding-specific suggested replacement
+// byte.
+type RepertoireError byte
+
+// Error implements the error interrface.
+func (r RepertoireError) Error() string {
+	return "encoding: rune not supported by encoding."
+}
+
+// Replacement returns the replacement string associated with this error.
+func (r RepertoireError) Replacement() byte { return byte(r) }
+
+var ErrASCIIReplacement = RepertoireError(encoding.ASCIISub)
diff --git a/vendor/golang.org/x/text/encoding/unicode/override.go b/vendor/golang.org/x/text/encoding/unicode/override.go
new file mode 100644
index 00000000..35d62fcc
--- /dev/null
+++ b/vendor/golang.org/x/text/encoding/unicode/override.go
@@ -0,0 +1,82 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package unicode
+
+import (
+	"golang.org/x/text/transform"
+)
+
+// BOMOverride returns a new decoder transformer that is identical to fallback,
+// except that the presence of a Byte Order Mark at the start of the input
+// causes it to switch to the corresponding Unicode decoding. It will only
+// consider BOMs for UTF-8, UTF-16BE, and UTF-16LE.
+//
+// This differs from using ExpectBOM by allowing a BOM to switch to UTF-8, not
+// just UTF-16 variants, and allowing falling back to any encoding scheme.
+//
+// This technique is recommended by the W3C for use in HTML 5: "For
+// compatibility with deployed content, the byte order mark (also known as BOM)
+// is considered more authoritative than anything else."
+// http://www.w3.org/TR/encoding/#specification-hooks
+//
+// Using BOMOverride is mostly intended for use cases where the first characters
+// of a fallback encoding are known to not be a BOM, for example, for valid HTML
+// and most encodings.
+func BOMOverride(fallback transform.Transformer) transform.Transformer {
+	// TODO: possibly allow a variadic argument of unicode encodings to allow
+	// specifying details of which fallbacks are supported as well as
+	// specifying the details of the implementations. This would also allow for
+	// support for UTF-32, which should not be supported by default.
+	return &bomOverride{fallback: fallback}
+}
+
+type bomOverride struct {
+	fallback transform.Transformer
+	current  transform.Transformer
+}
+
+func (d *bomOverride) Reset() {
+	d.current = nil
+	d.fallback.Reset()
+}
+
+var (
+	// TODO: we could use decode functions here, instead of allocating a new
+	// decoder on every NewDecoder as IgnoreBOM decoders can be stateless.
+	utf16le = UTF16(LittleEndian, IgnoreBOM)
+	utf16be = UTF16(BigEndian, IgnoreBOM)
+)
+
+const utf8BOM = "\ufeff"
+
+func (d *bomOverride) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
+	if d.current != nil {
+		return d.current.Transform(dst, src, atEOF)
+	}
+	if len(src) < 3 && !atEOF {
+		return 0, 0, transform.ErrShortSrc
+	}
+	d.current = d.fallback
+	bomSize := 0
+	if len(src) >= 2 {
+		if src[0] == 0xFF && src[1] == 0xFE {
+			d.current = utf16le.NewDecoder()
+			bomSize = 2
+		} else if src[0] == 0xFE && src[1] == 0xFF {
+			d.current = utf16be.NewDecoder()
+			bomSize = 2
+		} else if len(src) >= 3 &&
+			src[0] == utf8BOM[0] &&
+			src[1] == utf8BOM[1] &&
+			src[2] == utf8BOM[2] {
+			d.current = transform.Nop
+			bomSize = 3
+		}
+	}
+	if bomSize < len(src) {
+		nDst, nSrc, err = d.current.Transform(dst, src[bomSize:], atEOF)
+	}
+	return nDst, nSrc + bomSize, err
+}
diff --git a/vendor/golang.org/x/text/encoding/unicode/unicode.go b/vendor/golang.org/x/text/encoding/unicode/unicode.go
new file mode 100644
index 00000000..4850ff36
--- /dev/null
+++ b/vendor/golang.org/x/text/encoding/unicode/unicode.go
@@ -0,0 +1,434 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package unicode provides Unicode encodings such as UTF-16.
+package unicode // import "golang.org/x/text/encoding/unicode"
+
+import (
+	"errors"
+	"unicode/utf16"
+	"unicode/utf8"
+
+	"golang.org/x/text/encoding"
+	"golang.org/x/text/encoding/internal"
+	"golang.org/x/text/encoding/internal/identifier"
+	"golang.org/x/text/internal/utf8internal"
+	"golang.org/x/text/runes"
+	"golang.org/x/text/transform"
+)
+
+// TODO: I think the Transformers really should return errors on unmatched
+// surrogate pairs and odd numbers of bytes. This is not required by RFC 2781,
+// which leaves it open, but is suggested by WhatWG. It will allow for all error
+// modes as defined by WhatWG: fatal, HTML and Replacement. This would require
+// the introduction of some kind of error type for conveying the erroneous code
+// point.
+
+// UTF8 is the UTF-8 encoding.
+var UTF8 encoding.Encoding = utf8enc
+
+var utf8enc = &internal.Encoding{
+	&internal.SimpleEncoding{utf8Decoder{}, runes.ReplaceIllFormed()},
+	"UTF-8",
+	identifier.UTF8,
+}
+
+type utf8Decoder struct{ transform.NopResetter }
+
+func (utf8Decoder) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
+	var pSrc int // point from which to start copy in src
+	var accept utf8internal.AcceptRange
+
+	// The decoder can only make the input larger, not smaller.
+	n := len(src)
+	if len(dst) < n {
+		err = transform.ErrShortDst
+		n = len(dst)
+		atEOF = false
+	}
+	for nSrc < n {
+		c := src[nSrc]
+		if c < utf8.RuneSelf {
+			nSrc++
+			continue
+		}
+		first := utf8internal.First[c]
+		size := int(first & utf8internal.SizeMask)
+		if first == utf8internal.FirstInvalid {
+			goto handleInvalid // invalid starter byte
+		}
+		accept = utf8internal.AcceptRanges[first>>utf8internal.AcceptShift]
+		if nSrc+size > n {
+			if !atEOF {
+				// We may stop earlier than necessary here if the short sequence
+				// has invalid bytes. Not checking for this simplifies the code
+				// and may avoid duplicate computations in certain conditions.
+				if err == nil {
+					err = transform.ErrShortSrc
+				}
+				break
+			}
+			// Determine the maximal subpart of an ill-formed subsequence.
+			switch {
+			case nSrc+1 >= n || src[nSrc+1] < accept.Lo || accept.Hi < src[nSrc+1]:
+				size = 1
+			case nSrc+2 >= n || src[nSrc+2] < utf8internal.LoCB || utf8internal.HiCB < src[nSrc+2]:
+				size = 2
+			default:
+				size = 3 // As we are short, the maximum is 3.
+			}
+			goto handleInvalid
+		}
+		if c = src[nSrc+1]; c < accept.Lo || accept.Hi < c {
+			size = 1
+			goto handleInvalid // invalid continuation byte
+		} else if size == 2 {
+		} else if c = src[nSrc+2]; c < utf8internal.LoCB || utf8internal.HiCB < c {
+			size = 2
+			goto handleInvalid // invalid continuation byte
+		} else if size == 3 {
+		} else if c = src[nSrc+3]; c < utf8internal.LoCB || utf8internal.HiCB < c {
+			size = 3
+			goto handleInvalid // invalid continuation byte
+		}
+		nSrc += size
+		continue
+
+	handleInvalid:
+		// Copy the scanned input so far.
+		nDst += copy(dst[nDst:], src[pSrc:nSrc])
+
+		// Append RuneError to the destination.
+		const runeError = "\ufffd"
+		if nDst+len(runeError) > len(dst) {
+			return nDst, nSrc, transform.ErrShortDst
+		}
+		nDst += copy(dst[nDst:], runeError)
+
+		// Skip the maximal subpart of an ill-formed subsequence according to
+		// the W3C standard way instead of the Go way. This Transform is
+		// probably the only place in the text repo where it is warranted.
+		nSrc += size
+		pSrc = nSrc
+
+		// Recompute the maximum source length.
+		if sz := len(dst) - nDst; sz < len(src)-nSrc {
+			err = transform.ErrShortDst
+			n = nSrc + sz
+			atEOF = false
+		}
+	}
+	return nDst + copy(dst[nDst:], src[pSrc:nSrc]), nSrc, err
+}
+
+// UTF16 returns a UTF-16 Encoding for the given default endianness and byte
+// order mark (BOM) policy.
+//
+// When decoding from UTF-16 to UTF-8, if the BOMPolicy is IgnoreBOM then
+// neither BOMs U+FEFF nor noncharacters U+FFFE in the input stream will affect
+// the endianness used for decoding, and will instead be output as their
+// standard UTF-8 encodings: "\xef\xbb\xbf" and "\xef\xbf\xbe". If the BOMPolicy
+// is UseBOM or ExpectBOM a staring BOM is not written to the UTF-8 output.
+// Instead, it overrides the default endianness e for the remainder of the
+// transformation. Any subsequent BOMs U+FEFF or noncharacters U+FFFE will not
+// affect the endianness used, and will instead be output as their standard
+// UTF-8 encodings. For UseBOM, if there is no starting BOM, it will proceed
+// with the default Endianness. For ExpectBOM, in that case, the transformation
+// will return early with an ErrMissingBOM error.
+//
+// When encoding from UTF-8 to UTF-16, a BOM will be inserted at the start of
+// the output if the BOMPolicy is UseBOM or ExpectBOM. Otherwise, a BOM will not
+// be inserted. The UTF-8 input does not need to contain a BOM.
+//
+// There is no concept of a 'native' endianness. If the UTF-16 data is produced
+// and consumed in a greater context that implies a certain endianness, use
+// IgnoreBOM. Otherwise, use ExpectBOM and always produce and consume a BOM.
+//
+// In the language of https://www.unicode.org/faq/utf_bom.html#bom10, IgnoreBOM
+// corresponds to "Where the precise type of the data stream is known... the
+// BOM should not be used" and ExpectBOM corresponds to "A particular
+// protocol... may require use of the BOM".
+func UTF16(e Endianness, b BOMPolicy) encoding.Encoding {
+	return utf16Encoding{config{e, b}, mibValue[e][b&bomMask]}
+}
+
+// mibValue maps Endianness and BOMPolicy settings to MIB constants. Note that
+// some configurations map to the same MIB identifier. RFC 2781 has requirements
+// and recommendations. Some of the "configurations" are merely recommendations,
+// so multiple configurations could match.
+var mibValue = map[Endianness][numBOMValues]identifier.MIB{
+	BigEndian: [numBOMValues]identifier.MIB{
+		IgnoreBOM: identifier.UTF16BE,
+		UseBOM:    identifier.UTF16, // BigEnding default is preferred by RFC 2781.
+		// TODO: acceptBOM | strictBOM would map to UTF16BE as well.
+	},
+	LittleEndian: [numBOMValues]identifier.MIB{
+		IgnoreBOM: identifier.UTF16LE,
+		UseBOM:    identifier.UTF16, // LittleEndian default is allowed and preferred on Windows.
+		// TODO: acceptBOM | strictBOM would map to UTF16LE as well.
+	},
+	// ExpectBOM is not widely used and has no valid MIB identifier.
+}
+
+// All lists a configuration for each IANA-defined UTF-16 variant.
+var All = []encoding.Encoding{
+	UTF8,
+	UTF16(BigEndian, UseBOM),
+	UTF16(BigEndian, IgnoreBOM),
+	UTF16(LittleEndian, IgnoreBOM),
+}
+
+// BOMPolicy is a UTF-16 encoding's byte order mark policy.
+type BOMPolicy uint8
+
+const (
+	writeBOM   BOMPolicy = 0x01
+	acceptBOM  BOMPolicy = 0x02
+	requireBOM BOMPolicy = 0x04
+	bomMask    BOMPolicy = 0x07
+
+	// HACK: numBOMValues == 8 triggers a bug in the 1.4 compiler (cannot have a
+	// map of an array of length 8 of a type that is also used as a key or value
+	// in another map). See golang.org/issue/11354.
+	// TODO: consider changing this value back to 8 if the use of 1.4.* has
+	// been minimized.
+	numBOMValues = 8 + 1
+
+	// IgnoreBOM means to ignore any byte order marks.
+	IgnoreBOM BOMPolicy = 0
+	// Common and RFC 2781-compliant interpretation for UTF-16BE/LE.
+
+	// UseBOM means that the UTF-16 form may start with a byte order mark, which
+	// will be used to override the default encoding.
+	UseBOM BOMPolicy = writeBOM | acceptBOM
+	// Common and RFC 2781-compliant interpretation for UTF-16.
+
+	// ExpectBOM means that the UTF-16 form must start with a byte order mark,
+	// which will be used to override the default encoding.
+	ExpectBOM BOMPolicy = writeBOM | acceptBOM | requireBOM
+	// Used in Java as Unicode (not to be confused with Java's UTF-16) and
+	// ICU's UTF-16,version=1. Not compliant with RFC 2781.
+
+	// TODO (maybe): strictBOM: BOM must match Endianness. This would allow:
+	// - UTF-16(B|L)E,version=1: writeBOM | acceptBOM | requireBOM | strictBOM
+	//    (UnicodeBig and UnicodeLittle in Java)
+	// - RFC 2781-compliant, but less common interpretation for UTF-16(B|L)E:
+	//    acceptBOM | strictBOM (e.g. assigned to CheckBOM).
+	// This addition would be consistent with supporting ExpectBOM.
+)
+
+// Endianness is a UTF-16 encoding's default endianness.
+type Endianness bool
+
+const (
+	// BigEndian is UTF-16BE.
+	BigEndian Endianness = false
+	// LittleEndian is UTF-16LE.
+	LittleEndian Endianness = true
+)
+
+// ErrMissingBOM means that decoding UTF-16 input with ExpectBOM did not find a
+// starting byte order mark.
+var ErrMissingBOM = errors.New("encoding: missing byte order mark")
+
+type utf16Encoding struct {
+	config
+	mib identifier.MIB
+}
+
+type config struct {
+	endianness Endianness
+	bomPolicy  BOMPolicy
+}
+
+func (u utf16Encoding) NewDecoder() *encoding.Decoder {
+	return &encoding.Decoder{Transformer: &utf16Decoder{
+		initial: u.config,
+		current: u.config,
+	}}
+}
+
+func (u utf16Encoding) NewEncoder() *encoding.Encoder {
+	return &encoding.Encoder{Transformer: &utf16Encoder{
+		endianness:       u.endianness,
+		initialBOMPolicy: u.bomPolicy,
+		currentBOMPolicy: u.bomPolicy,
+	}}
+}
+
+func (u utf16Encoding) ID() (mib identifier.MIB, other string) {
+	return u.mib, ""
+}
+
+func (u utf16Encoding) String() string {
+	e, b := "B", ""
+	if u.endianness == LittleEndian {
+		e = "L"
+	}
+	switch u.bomPolicy {
+	case ExpectBOM:
+		b = "Expect"
+	case UseBOM:
+		b = "Use"
+	case IgnoreBOM:
+		b = "Ignore"
+	}
+	return "UTF-16" + e + "E (" + b + " BOM)"
+}
+
+type utf16Decoder struct {
+	initial config
+	current config
+}
+
+func (u *utf16Decoder) Reset() {
+	u.current = u.initial
+}
+
+func (u *utf16Decoder) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
+	if len(src) == 0 {
+		if atEOF && u.current.bomPolicy&requireBOM != 0 {
+			return 0, 0, ErrMissingBOM
+		}
+		return 0, 0, nil
+	}
+	if u.current.bomPolicy&acceptBOM != 0 {
+		if len(src) < 2 {
+			return 0, 0, transform.ErrShortSrc
+		}
+		switch {
+		case src[0] == 0xfe && src[1] == 0xff:
+			u.current.endianness = BigEndian
+			nSrc = 2
+		case src[0] == 0xff && src[1] == 0xfe:
+			u.current.endianness = LittleEndian
+			nSrc = 2
+		default:
+			if u.current.bomPolicy&requireBOM != 0 {
+				return 0, 0, ErrMissingBOM
+			}
+		}
+		u.current.bomPolicy = IgnoreBOM
+	}
+
+	var r rune
+	var dSize, sSize int
+	for nSrc < len(src) {
+		if nSrc+1 < len(src) {
+			x := uint16(src[nSrc+0])<<8 | uint16(src[nSrc+1])
+			if u.current.endianness == LittleEndian {
+				x = x>>8 | x<<8
+			}
+			r, sSize = rune(x), 2
+			if utf16.IsSurrogate(r) {
+				if nSrc+3 < len(src) {
+					x = uint16(src[nSrc+2])<<8 | uint16(src[nSrc+3])
+					if u.current.endianness == LittleEndian {
+						x = x>>8 | x<<8
+					}
+					// Save for next iteration if it is not a high surrogate.
+					if isHighSurrogate(rune(x)) {
+						r, sSize = utf16.DecodeRune(r, rune(x)), 4
+					}
+				} else if !atEOF {
+					err = transform.ErrShortSrc
+					break
+				}
+			}
+			if dSize = utf8.RuneLen(r); dSize < 0 {
+				r, dSize = utf8.RuneError, 3
+			}
+		} else if atEOF {
+			// Single trailing byte.
+			r, dSize, sSize = utf8.RuneError, 3, 1
+		} else {
+			err = transform.ErrShortSrc
+			break
+		}
+		if nDst+dSize > len(dst) {
+			err = transform.ErrShortDst
+			break
+		}
+		nDst += utf8.EncodeRune(dst[nDst:], r)
+		nSrc += sSize
+	}
+	return nDst, nSrc, err
+}
+
+func isHighSurrogate(r rune) bool {
+	return 0xDC00 <= r && r <= 0xDFFF
+}
+
+type utf16Encoder struct {
+	endianness       Endianness
+	initialBOMPolicy BOMPolicy
+	currentBOMPolicy BOMPolicy
+}
+
+func (u *utf16Encoder) Reset() {
+	u.currentBOMPolicy = u.initialBOMPolicy
+}
+
+func (u *utf16Encoder) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
+	if u.currentBOMPolicy&writeBOM != 0 {
+		if len(dst) < 2 {
+			return 0, 0, transform.ErrShortDst
+		}
+		dst[0], dst[1] = 0xfe, 0xff
+		u.currentBOMPolicy = IgnoreBOM
+		nDst = 2
+	}
+
+	r, size := rune(0), 0
+	for nSrc < len(src) {
+		r = rune(src[nSrc])
+
+		// Decode a 1-byte rune.
+		if r < utf8.RuneSelf {
+			size = 1
+
+		} else {
+			// Decode a multi-byte rune.
+			r, size = utf8.DecodeRune(src[nSrc:])
+			if size == 1 {
+				// All valid runes of size 1 (those below utf8.RuneSelf) were
+				// handled above. We have invalid UTF-8 or we haven't seen the
+				// full character yet.
+				if !atEOF && !utf8.FullRune(src[nSrc:]) {
+					err = transform.ErrShortSrc
+					break
+				}
+			}
+		}
+
+		if r <= 0xffff {
+			if nDst+2 > len(dst) {
+				err = transform.ErrShortDst
+				break
+			}
+			dst[nDst+0] = uint8(r >> 8)
+			dst[nDst+1] = uint8(r)
+			nDst += 2
+		} else {
+			if nDst+4 > len(dst) {
+				err = transform.ErrShortDst
+				break
+			}
+			r1, r2 := utf16.EncodeRune(r)
+			dst[nDst+0] = uint8(r1 >> 8)
+			dst[nDst+1] = uint8(r1)
+			dst[nDst+2] = uint8(r2 >> 8)
+			dst[nDst+3] = uint8(r2)
+			nDst += 4
+		}
+		nSrc += size
+	}
+
+	if u.endianness == LittleEndian {
+		for i := 0; i < nDst; i += 2 {
+			dst[i], dst[i+1] = dst[i+1], dst[i]
+		}
+	}
+	return nDst, nSrc, err
+}
diff --git a/vendor/golang.org/x/text/internal/utf8internal/utf8internal.go b/vendor/golang.org/x/text/internal/utf8internal/utf8internal.go
new file mode 100644
index 00000000..575cea87
--- /dev/null
+++ b/vendor/golang.org/x/text/internal/utf8internal/utf8internal.go
@@ -0,0 +1,87 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package utf8internal contains low-level utf8-related constants, tables, etc.
+// that are used internally by the text package.
+package utf8internal
+
+// The default lowest and highest continuation byte.
+const (
+	LoCB = 0x80 // 1000 0000
+	HiCB = 0xBF // 1011 1111
+)
+
+// Constants related to getting information of first bytes of UTF-8 sequences.
+const (
+	// ASCII identifies a UTF-8 byte as ASCII.
+	ASCII = as
+
+	// FirstInvalid indicates a byte is invalid as a first byte of a UTF-8
+	// sequence.
+	FirstInvalid = xx
+
+	// SizeMask is a mask for the size bits. Use use x&SizeMask to get the size.
+	SizeMask = 7
+
+	// AcceptShift is the right-shift count for the first byte info byte to get
+	// the index into the AcceptRanges table. See AcceptRanges.
+	AcceptShift = 4
+
+	// The names of these constants are chosen to give nice alignment in the
+	// table below. The first nibble is an index into acceptRanges or F for
+	// special one-byte cases. The second nibble is the Rune length or the
+	// Status for the special one-byte case.
+	xx = 0xF1 // invalid: size 1
+	as = 0xF0 // ASCII: size 1
+	s1 = 0x02 // accept 0, size 2
+	s2 = 0x13 // accept 1, size 3
+	s3 = 0x03 // accept 0, size 3
+	s4 = 0x23 // accept 2, size 3
+	s5 = 0x34 // accept 3, size 4
+	s6 = 0x04 // accept 0, size 4
+	s7 = 0x44 // accept 4, size 4
+)
+
+// First is information about the first byte in a UTF-8 sequence.
+var First = [256]uint8{
+	//   1   2   3   4   5   6   7   8   9   A   B   C   D   E   F
+	as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x00-0x0F
+	as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x10-0x1F
+	as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x20-0x2F
+	as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x30-0x3F
+	as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x40-0x4F
+	as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x50-0x5F
+	as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x60-0x6F
+	as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x70-0x7F
+	//   1   2   3   4   5   6   7   8   9   A   B   C   D   E   F
+	xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, // 0x80-0x8F
+	xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, // 0x90-0x9F
+	xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, // 0xA0-0xAF
+	xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, // 0xB0-0xBF
+	xx, xx, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, // 0xC0-0xCF
+	s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, // 0xD0-0xDF
+	s2, s3, s3, s3, s3, s3, s3, s3, s3, s3, s3, s3, s3, s4, s3, s3, // 0xE0-0xEF
+	s5, s6, s6, s6, s7, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, // 0xF0-0xFF
+}
+
+// AcceptRange gives the range of valid values for the second byte in a UTF-8
+// sequence for any value for First that is not ASCII or FirstInvalid.
+type AcceptRange struct {
+	Lo uint8 // lowest value for second byte.
+	Hi uint8 // highest value for second byte.
+}
+
+// AcceptRanges is a slice of AcceptRange values. For a given byte sequence b
+//
+//		AcceptRanges[First[b[0]]>>AcceptShift]
+//
+// will give the value of AcceptRange for the multi-byte UTF-8 sequence starting
+// at b[0].
+var AcceptRanges = [...]AcceptRange{
+	0: {LoCB, HiCB},
+	1: {0xA0, HiCB},
+	2: {LoCB, 0x9F},
+	3: {0x90, HiCB},
+	4: {LoCB, 0x8F},
+}
diff --git a/vendor/golang.org/x/text/runes/cond.go b/vendor/golang.org/x/text/runes/cond.go
new file mode 100644
index 00000000..df7aa02d
--- /dev/null
+++ b/vendor/golang.org/x/text/runes/cond.go
@@ -0,0 +1,187 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runes
+
+import (
+	"unicode/utf8"
+
+	"golang.org/x/text/transform"
+)
+
+// Note: below we pass invalid UTF-8 to the tIn and tNotIn transformers as is.
+// This is done for various reasons:
+// - To retain the semantics of the Nop transformer: if input is passed to a Nop
+//   one would expect it to be unchanged.
+// - It would be very expensive to pass a converted RuneError to a transformer:
+//   a transformer might need more source bytes after RuneError, meaning that
+//   the only way to pass it safely is to create a new buffer and manage the
+//   intermingling of RuneErrors and normal input.
+// - Many transformers leave ill-formed UTF-8 as is, so this is not
+//   inconsistent. Generally ill-formed UTF-8 is only replaced if it is a
+//   logical consequence of the operation (as for Map) or if it otherwise would
+//   pose security concerns (as for Remove).
+// - An alternative would be to return an error on ill-formed UTF-8, but this
+//   would be inconsistent with other operations.
+
+// If returns a transformer that applies tIn to consecutive runes for which
+// s.Contains(r) and tNotIn to consecutive runes for which !s.Contains(r). Reset
+// is called on tIn and tNotIn at the start of each run. A Nop transformer will
+// substitute a nil value passed to tIn or tNotIn. Invalid UTF-8 is translated
+// to RuneError to determine which transformer to apply, but is passed as is to
+// the respective transformer.
+func If(s Set, tIn, tNotIn transform.Transformer) Transformer {
+	if tIn == nil && tNotIn == nil {
+		return Transformer{transform.Nop}
+	}
+	if tIn == nil {
+		tIn = transform.Nop
+	}
+	if tNotIn == nil {
+		tNotIn = transform.Nop
+	}
+	sIn, ok := tIn.(transform.SpanningTransformer)
+	if !ok {
+		sIn = dummySpan{tIn}
+	}
+	sNotIn, ok := tNotIn.(transform.SpanningTransformer)
+	if !ok {
+		sNotIn = dummySpan{tNotIn}
+	}
+
+	a := &cond{
+		tIn:    sIn,
+		tNotIn: sNotIn,
+		f:      s.Contains,
+	}
+	a.Reset()
+	return Transformer{a}
+}
+
+type dummySpan struct{ transform.Transformer }
+
+func (d dummySpan) Span(src []byte, atEOF bool) (n int, err error) {
+	return 0, transform.ErrEndOfSpan
+}
+
+type cond struct {
+	tIn, tNotIn transform.SpanningTransformer
+	f           func(rune) bool
+	check       func(rune) bool               // current check to perform
+	t           transform.SpanningTransformer // current transformer to use
+}
+
+// Reset implements transform.Transformer.
+func (t *cond) Reset() {
+	t.check = t.is
+	t.t = t.tIn
+	t.t.Reset() // notIn will be reset on first usage.
+}
+
+func (t *cond) is(r rune) bool {
+	if t.f(r) {
+		return true
+	}
+	t.check = t.isNot
+	t.t = t.tNotIn
+	t.tNotIn.Reset()
+	return false
+}
+
+func (t *cond) isNot(r rune) bool {
+	if !t.f(r) {
+		return true
+	}
+	t.check = t.is
+	t.t = t.tIn
+	t.tIn.Reset()
+	return false
+}
+
+// This implementation of Span doesn't help all too much, but it needs to be
+// there to satisfy this package's Transformer interface.
+// TODO: there are certainly room for improvements, though. For example, if
+// t.t == transform.Nop (which will a common occurrence) it will save a bundle
+// to special-case that loop.
+func (t *cond) Span(src []byte, atEOF bool) (n int, err error) {
+	p := 0
+	for n < len(src) && err == nil {
+		// Don't process too much at a time as the Spanner that will be
+		// called on this block may terminate early.
+		const maxChunk = 4096
+		max := len(src)
+		if v := n + maxChunk; v < max {
+			max = v
+		}
+		atEnd := false
+		size := 0
+		current := t.t
+		for ; p < max; p += size {
+			r := rune(src[p])
+			if r < utf8.RuneSelf {
+				size = 1
+			} else if r, size = utf8.DecodeRune(src[p:]); size == 1 {
+				if !atEOF && !utf8.FullRune(src[p:]) {
+					err = transform.ErrShortSrc
+					break
+				}
+			}
+			if !t.check(r) {
+				// The next rune will be the start of a new run.
+				atEnd = true
+				break
+			}
+		}
+		n2, err2 := current.Span(src[n:p], atEnd || (atEOF && p == len(src)))
+		n += n2
+		if err2 != nil {
+			return n, err2
+		}
+		// At this point either err != nil or t.check will pass for the rune at p.
+		p = n + size
+	}
+	return n, err
+}
+
+func (t *cond) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
+	p := 0
+	for nSrc < len(src) && err == nil {
+		// Don't process too much at a time, as the work might be wasted if the
+		// destination buffer isn't large enough to hold the result or a
+		// transform returns an error early.
+		const maxChunk = 4096
+		max := len(src)
+		if n := nSrc + maxChunk; n < len(src) {
+			max = n
+		}
+		atEnd := false
+		size := 0
+		current := t.t
+		for ; p < max; p += size {
+			r := rune(src[p])
+			if r < utf8.RuneSelf {
+				size = 1
+			} else if r, size = utf8.DecodeRune(src[p:]); size == 1 {
+				if !atEOF && !utf8.FullRune(src[p:]) {
+					err = transform.ErrShortSrc
+					break
+				}
+			}
+			if !t.check(r) {
+				// The next rune will be the start of a new run.
+				atEnd = true
+				break
+			}
+		}
+		nDst2, nSrc2, err2 := current.Transform(dst[nDst:], src[nSrc:p], atEnd || (atEOF && p == len(src)))
+		nDst += nDst2
+		nSrc += nSrc2
+		if err2 != nil {
+			return nDst, nSrc, err2
+		}
+		// At this point either err != nil or t.check will pass for the rune at p.
+		p = nSrc + size
+	}
+	return nDst, nSrc, err
+}
diff --git a/vendor/golang.org/x/text/runes/runes.go b/vendor/golang.org/x/text/runes/runes.go
new file mode 100644
index 00000000..71933696
--- /dev/null
+++ b/vendor/golang.org/x/text/runes/runes.go
@@ -0,0 +1,355 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package runes provide transforms for UTF-8 encoded text.
+package runes // import "golang.org/x/text/runes"
+
+import (
+	"unicode"
+	"unicode/utf8"
+
+	"golang.org/x/text/transform"
+)
+
+// A Set is a collection of runes.
+type Set interface {
+	// Contains returns true if r is contained in the set.
+	Contains(r rune) bool
+}
+
+type setFunc func(rune) bool
+
+func (s setFunc) Contains(r rune) bool {
+	return s(r)
+}
+
+// Note: using funcs here instead of wrapping types result in cleaner
+// documentation and a smaller API.
+
+// In creates a Set with a Contains method that returns true for all runes in
+// the given RangeTable.
+func In(rt *unicode.RangeTable) Set {
+	return setFunc(func(r rune) bool { return unicode.Is(rt, r) })
+}
+
+// In creates a Set with a Contains method that returns true for all runes not
+// in the given RangeTable.
+func NotIn(rt *unicode.RangeTable) Set {
+	return setFunc(func(r rune) bool { return !unicode.Is(rt, r) })
+}
+
+// Predicate creates a Set with a Contains method that returns f(r).
+func Predicate(f func(rune) bool) Set {
+	return setFunc(f)
+}
+
+// Transformer implements the transform.Transformer interface.
+type Transformer struct {
+	t transform.SpanningTransformer
+}
+
+func (t Transformer) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
+	return t.t.Transform(dst, src, atEOF)
+}
+
+func (t Transformer) Span(b []byte, atEOF bool) (n int, err error) {
+	return t.t.Span(b, atEOF)
+}
+
+func (t Transformer) Reset() { t.t.Reset() }
+
+// Bytes returns a new byte slice with the result of converting b using t.  It
+// calls Reset on t. It returns nil if any error was found. This can only happen
+// if an error-producing Transformer is passed to If.
+func (t Transformer) Bytes(b []byte) []byte {
+	b, _, err := transform.Bytes(t, b)
+	if err != nil {
+		return nil
+	}
+	return b
+}
+
+// String returns a string with the result of converting s using t. It calls
+// Reset on t. It returns the empty string if any error was found. This can only
+// happen if an error-producing Transformer is passed to If.
+func (t Transformer) String(s string) string {
+	s, _, err := transform.String(t, s)
+	if err != nil {
+		return ""
+	}
+	return s
+}
+
+// TODO:
+// - Copy: copying strings and bytes in whole-rune units.
+// - Validation (maybe)
+// - Well-formed-ness (maybe)
+
+const runeErrorString = string(utf8.RuneError)
+
+// Remove returns a Transformer that removes runes r for which s.Contains(r).
+// Illegal input bytes are replaced by RuneError before being passed to f.
+func Remove(s Set) Transformer {
+	if f, ok := s.(setFunc); ok {
+		// This little trick cuts the running time of BenchmarkRemove for sets
+		// created by Predicate roughly in half.
+		// TODO: special-case RangeTables as well.
+		return Transformer{remove(f)}
+	}
+	return Transformer{remove(s.Contains)}
+}
+
+// TODO: remove transform.RemoveFunc.
+
+type remove func(r rune) bool
+
+func (remove) Reset() {}
+
+// Span implements transform.Spanner.
+func (t remove) Span(src []byte, atEOF bool) (n int, err error) {
+	for r, size := rune(0), 0; n < len(src); {
+		if r = rune(src[n]); r < utf8.RuneSelf {
+			size = 1
+		} else if r, size = utf8.DecodeRune(src[n:]); size == 1 {
+			// Invalid rune.
+			if !atEOF && !utf8.FullRune(src[n:]) {
+				err = transform.ErrShortSrc
+			} else {
+				err = transform.ErrEndOfSpan
+			}
+			break
+		}
+		if t(r) {
+			err = transform.ErrEndOfSpan
+			break
+		}
+		n += size
+	}
+	return
+}
+
+// Transform implements transform.Transformer.
+func (t remove) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
+	for r, size := rune(0), 0; nSrc < len(src); {
+		if r = rune(src[nSrc]); r < utf8.RuneSelf {
+			size = 1
+		} else if r, size = utf8.DecodeRune(src[nSrc:]); size == 1 {
+			// Invalid rune.
+			if !atEOF && !utf8.FullRune(src[nSrc:]) {
+				err = transform.ErrShortSrc
+				break
+			}
+			// We replace illegal bytes with RuneError. Not doing so might
+			// otherwise turn a sequence of invalid UTF-8 into valid UTF-8.
+			// The resulting byte sequence may subsequently contain runes
+			// for which t(r) is true that were passed unnoticed.
+			if !t(utf8.RuneError) {
+				if nDst+3 > len(dst) {
+					err = transform.ErrShortDst
+					break
+				}
+				dst[nDst+0] = runeErrorString[0]
+				dst[nDst+1] = runeErrorString[1]
+				dst[nDst+2] = runeErrorString[2]
+				nDst += 3
+			}
+			nSrc++
+			continue
+		}
+		if t(r) {
+			nSrc += size
+			continue
+		}
+		if nDst+size > len(dst) {
+			err = transform.ErrShortDst
+			break
+		}
+		for i := 0; i < size; i++ {
+			dst[nDst] = src[nSrc]
+			nDst++
+			nSrc++
+		}
+	}
+	return
+}
+
+// Map returns a Transformer that maps the runes in the input using the given
+// mapping. Illegal bytes in the input are converted to utf8.RuneError before
+// being passed to the mapping func.
+func Map(mapping func(rune) rune) Transformer {
+	return Transformer{mapper(mapping)}
+}
+
+type mapper func(rune) rune
+
+func (mapper) Reset() {}
+
+// Span implements transform.Spanner.
+func (t mapper) Span(src []byte, atEOF bool) (n int, err error) {
+	for r, size := rune(0), 0; n < len(src); n += size {
+		if r = rune(src[n]); r < utf8.RuneSelf {
+			size = 1
+		} else if r, size = utf8.DecodeRune(src[n:]); size == 1 {
+			// Invalid rune.
+			if !atEOF && !utf8.FullRune(src[n:]) {
+				err = transform.ErrShortSrc
+			} else {
+				err = transform.ErrEndOfSpan
+			}
+			break
+		}
+		if t(r) != r {
+			err = transform.ErrEndOfSpan
+			break
+		}
+	}
+	return n, err
+}
+
+// Transform implements transform.Transformer.
+func (t mapper) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
+	var replacement rune
+	var b [utf8.UTFMax]byte
+
+	for r, size := rune(0), 0; nSrc < len(src); {
+		if r = rune(src[nSrc]); r < utf8.RuneSelf {
+			if replacement = t(r); replacement < utf8.RuneSelf {
+				if nDst == len(dst) {
+					err = transform.ErrShortDst
+					break
+				}
+				dst[nDst] = byte(replacement)
+				nDst++
+				nSrc++
+				continue
+			}
+			size = 1
+		} else if r, size = utf8.DecodeRune(src[nSrc:]); size == 1 {
+			// Invalid rune.
+			if !atEOF && !utf8.FullRune(src[nSrc:]) {
+				err = transform.ErrShortSrc
+				break
+			}
+
+			if replacement = t(utf8.RuneError); replacement == utf8.RuneError {
+				if nDst+3 > len(dst) {
+					err = transform.ErrShortDst
+					break
+				}
+				dst[nDst+0] = runeErrorString[0]
+				dst[nDst+1] = runeErrorString[1]
+				dst[nDst+2] = runeErrorString[2]
+				nDst += 3
+				nSrc++
+				continue
+			}
+		} else if replacement = t(r); replacement == r {
+			if nDst+size > len(dst) {
+				err = transform.ErrShortDst
+				break
+			}
+			for i := 0; i < size; i++ {
+				dst[nDst] = src[nSrc]
+				nDst++
+				nSrc++
+			}
+			continue
+		}
+
+		n := utf8.EncodeRune(b[:], replacement)
+
+		if nDst+n > len(dst) {
+			err = transform.ErrShortDst
+			break
+		}
+		for i := 0; i < n; i++ {
+			dst[nDst] = b[i]
+			nDst++
+		}
+		nSrc += size
+	}
+	return
+}
+
+// ReplaceIllFormed returns a transformer that replaces all input bytes that are
+// not part of a well-formed UTF-8 code sequence with utf8.RuneError.
+func ReplaceIllFormed() Transformer {
+	return Transformer{&replaceIllFormed{}}
+}
+
+type replaceIllFormed struct{ transform.NopResetter }
+
+func (t replaceIllFormed) Span(src []byte, atEOF bool) (n int, err error) {
+	for n < len(src) {
+		// ASCII fast path.
+		if src[n] < utf8.RuneSelf {
+			n++
+			continue
+		}
+
+		r, size := utf8.DecodeRune(src[n:])
+
+		// Look for a valid non-ASCII rune.
+		if r != utf8.RuneError || size != 1 {
+			n += size
+			continue
+		}
+
+		// Look for short source data.
+		if !atEOF && !utf8.FullRune(src[n:]) {
+			err = transform.ErrShortSrc
+			break
+		}
+
+		// We have an invalid rune.
+		err = transform.ErrEndOfSpan
+		break
+	}
+	return n, err
+}
+
+func (t replaceIllFormed) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
+	for nSrc < len(src) {
+		// ASCII fast path.
+		if r := src[nSrc]; r < utf8.RuneSelf {
+			if nDst == len(dst) {
+				err = transform.ErrShortDst
+				break
+			}
+			dst[nDst] = r
+			nDst++
+			nSrc++
+			continue
+		}
+
+		// Look for a valid non-ASCII rune.
+		if _, size := utf8.DecodeRune(src[nSrc:]); size != 1 {
+			if size != copy(dst[nDst:], src[nSrc:nSrc+size]) {
+				err = transform.ErrShortDst
+				break
+			}
+			nDst += size
+			nSrc += size
+			continue
+		}
+
+		// Look for short source data.
+		if !atEOF && !utf8.FullRune(src[nSrc:]) {
+			err = transform.ErrShortSrc
+			break
+		}
+
+		// We have an invalid rune.
+		if nDst+3 > len(dst) {
+			err = transform.ErrShortDst
+			break
+		}
+		dst[nDst+0] = runeErrorString[0]
+		dst[nDst+1] = runeErrorString[1]
+		dst[nDst+2] = runeErrorString[2]
+		nDst += 3
+		nSrc++
+	}
+	return nDst, nSrc, err
+}
diff --git a/vendor/golang.org/x/text/width/kind_string.go b/vendor/golang.org/x/text/width/kind_string.go
new file mode 100644
index 00000000..dd3febd4
--- /dev/null
+++ b/vendor/golang.org/x/text/width/kind_string.go
@@ -0,0 +1,28 @@
+// Code generated by "stringer -type=Kind"; DO NOT EDIT.
+
+package width
+
+import "strconv"
+
+func _() {
+	// An "invalid array index" compiler error signifies that the constant values have changed.
+	// Re-run the stringer command to generate them again.
+	var x [1]struct{}
+	_ = x[Neutral-0]
+	_ = x[EastAsianAmbiguous-1]
+	_ = x[EastAsianWide-2]
+	_ = x[EastAsianNarrow-3]
+	_ = x[EastAsianFullwidth-4]
+	_ = x[EastAsianHalfwidth-5]
+}
+
+const _Kind_name = "NeutralEastAsianAmbiguousEastAsianWideEastAsianNarrowEastAsianFullwidthEastAsianHalfwidth"
+
+var _Kind_index = [...]uint8{0, 7, 25, 38, 53, 71, 89}
+
+func (i Kind) String() string {
+	if i < 0 || i >= Kind(len(_Kind_index)-1) {
+		return "Kind(" + strconv.FormatInt(int64(i), 10) + ")"
+	}
+	return _Kind_name[_Kind_index[i]:_Kind_index[i+1]]
+}
diff --git a/vendor/golang.org/x/text/width/tables10.0.0.go b/vendor/golang.org/x/text/width/tables10.0.0.go
new file mode 100644
index 00000000..decb8e48
--- /dev/null
+++ b/vendor/golang.org/x/text/width/tables10.0.0.go
@@ -0,0 +1,1318 @@
+// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
+
+// +build go1.10,!go1.13
+
+package width
+
+// UnicodeVersion is the Unicode version from which the tables in this package are derived.
+const UnicodeVersion = "10.0.0"
+
+// lookup returns the trie value for the first UTF-8 encoding in s and
+// the width in bytes of this encoding. The size will be 0 if s does not
+// hold enough bytes to complete the encoding. len(s) must be greater than 0.
+func (t *widthTrie) lookup(s []byte) (v uint16, sz int) {
+	c0 := s[0]
+	switch {
+	case c0 < 0x80: // is ASCII
+		return widthValues[c0], 1
+	case c0 < 0xC2:
+		return 0, 1 // Illegal UTF-8: not a starter, not ASCII.
+	case c0 < 0xE0: // 2-byte UTF-8
+		if len(s) < 2 {
+			return 0, 0
+		}
+		i := widthIndex[c0]
+		c1 := s[1]
+		if c1 < 0x80 || 0xC0 <= c1 {
+			return 0, 1 // Illegal UTF-8: not a continuation byte.
+		}
+		return t.lookupValue(uint32(i), c1), 2
+	case c0 < 0xF0: // 3-byte UTF-8
+		if len(s) < 3 {
+			return 0, 0
+		}
+		i := widthIndex[c0]
+		c1 := s[1]
+		if c1 < 0x80 || 0xC0 <= c1 {
+			return 0, 1 // Illegal UTF-8: not a continuation byte.
+		}
+		o := uint32(i)<<6 + uint32(c1)
+		i = widthIndex[o]
+		c2 := s[2]
+		if c2 < 0x80 || 0xC0 <= c2 {
+			return 0, 2 // Illegal UTF-8: not a continuation byte.
+		}
+		return t.lookupValue(uint32(i), c2), 3
+	case c0 < 0xF8: // 4-byte UTF-8
+		if len(s) < 4 {
+			return 0, 0
+		}
+		i := widthIndex[c0]
+		c1 := s[1]
+		if c1 < 0x80 || 0xC0 <= c1 {
+			return 0, 1 // Illegal UTF-8: not a continuation byte.
+		}
+		o := uint32(i)<<6 + uint32(c1)
+		i = widthIndex[o]
+		c2 := s[2]
+		if c2 < 0x80 || 0xC0 <= c2 {
+			return 0, 2 // Illegal UTF-8: not a continuation byte.
+		}
+		o = uint32(i)<<6 + uint32(c2)
+		i = widthIndex[o]
+		c3 := s[3]
+		if c3 < 0x80 || 0xC0 <= c3 {
+			return 0, 3 // Illegal UTF-8: not a continuation byte.
+		}
+		return t.lookupValue(uint32(i), c3), 4
+	}
+	// Illegal rune
+	return 0, 1
+}
+
+// lookupUnsafe returns the trie value for the first UTF-8 encoding in s.
+// s must start with a full and valid UTF-8 encoded rune.
+func (t *widthTrie) lookupUnsafe(s []byte) uint16 {
+	c0 := s[0]
+	if c0 < 0x80 { // is ASCII
+		return widthValues[c0]
+	}
+	i := widthIndex[c0]
+	if c0 < 0xE0 { // 2-byte UTF-8
+		return t.lookupValue(uint32(i), s[1])
+	}
+	i = widthIndex[uint32(i)<<6+uint32(s[1])]
+	if c0 < 0xF0 { // 3-byte UTF-8
+		return t.lookupValue(uint32(i), s[2])
+	}
+	i = widthIndex[uint32(i)<<6+uint32(s[2])]
+	if c0 < 0xF8 { // 4-byte UTF-8
+		return t.lookupValue(uint32(i), s[3])
+	}
+	return 0
+}
+
+// lookupString returns the trie value for the first UTF-8 encoding in s and
+// the width in bytes of this encoding. The size will be 0 if s does not
+// hold enough bytes to complete the encoding. len(s) must be greater than 0.
+func (t *widthTrie) lookupString(s string) (v uint16, sz int) {
+	c0 := s[0]
+	switch {
+	case c0 < 0x80: // is ASCII
+		return widthValues[c0], 1
+	case c0 < 0xC2:
+		return 0, 1 // Illegal UTF-8: not a starter, not ASCII.
+	case c0 < 0xE0: // 2-byte UTF-8
+		if len(s) < 2 {
+			return 0, 0
+		}
+		i := widthIndex[c0]
+		c1 := s[1]
+		if c1 < 0x80 || 0xC0 <= c1 {
+			return 0, 1 // Illegal UTF-8: not a continuation byte.
+		}
+		return t.lookupValue(uint32(i), c1), 2
+	case c0 < 0xF0: // 3-byte UTF-8
+		if len(s) < 3 {
+			return 0, 0
+		}
+		i := widthIndex[c0]
+		c1 := s[1]
+		if c1 < 0x80 || 0xC0 <= c1 {
+			return 0, 1 // Illegal UTF-8: not a continuation byte.
+		}
+		o := uint32(i)<<6 + uint32(c1)
+		i = widthIndex[o]
+		c2 := s[2]
+		if c2 < 0x80 || 0xC0 <= c2 {
+			return 0, 2 // Illegal UTF-8: not a continuation byte.
+		}
+		return t.lookupValue(uint32(i), c2), 3
+	case c0 < 0xF8: // 4-byte UTF-8
+		if len(s) < 4 {
+			return 0, 0
+		}
+		i := widthIndex[c0]
+		c1 := s[1]
+		if c1 < 0x80 || 0xC0 <= c1 {
+			return 0, 1 // Illegal UTF-8: not a continuation byte.
+		}
+		o := uint32(i)<<6 + uint32(c1)
+		i = widthIndex[o]
+		c2 := s[2]
+		if c2 < 0x80 || 0xC0 <= c2 {
+			return 0, 2 // Illegal UTF-8: not a continuation byte.
+		}
+		o = uint32(i)<<6 + uint32(c2)
+		i = widthIndex[o]
+		c3 := s[3]
+		if c3 < 0x80 || 0xC0 <= c3 {
+			return 0, 3 // Illegal UTF-8: not a continuation byte.
+		}
+		return t.lookupValue(uint32(i), c3), 4
+	}
+	// Illegal rune
+	return 0, 1
+}
+
+// lookupStringUnsafe returns the trie value for the first UTF-8 encoding in s.
+// s must start with a full and valid UTF-8 encoded rune.
+func (t *widthTrie) lookupStringUnsafe(s string) uint16 {
+	c0 := s[0]
+	if c0 < 0x80 { // is ASCII
+		return widthValues[c0]
+	}
+	i := widthIndex[c0]
+	if c0 < 0xE0 { // 2-byte UTF-8
+		return t.lookupValue(uint32(i), s[1])
+	}
+	i = widthIndex[uint32(i)<<6+uint32(s[1])]
+	if c0 < 0xF0 { // 3-byte UTF-8
+		return t.lookupValue(uint32(i), s[2])
+	}
+	i = widthIndex[uint32(i)<<6+uint32(s[2])]
+	if c0 < 0xF8 { // 4-byte UTF-8
+		return t.lookupValue(uint32(i), s[3])
+	}
+	return 0
+}
+
+// widthTrie. Total size: 14336 bytes (14.00 KiB). Checksum: c59df54630d3dc4a.
+type widthTrie struct{}
+
+func newWidthTrie(i int) *widthTrie {
+	return &widthTrie{}
+}
+
+// lookupValue determines the type of block n and looks up the value for b.
+func (t *widthTrie) lookupValue(n uint32, b byte) uint16 {
+	switch {
+	default:
+		return uint16(widthValues[n<<6+uint32(b)])
+	}
+}
+
+// widthValues: 101 blocks, 6464 entries, 12928 bytes
+// The third block is the zero block.
+var widthValues = [6464]uint16{
+	// Block 0x0, offset 0x0
+	0x20: 0x6001, 0x21: 0x6002, 0x22: 0x6002, 0x23: 0x6002,
+	0x24: 0x6002, 0x25: 0x6002, 0x26: 0x6002, 0x27: 0x6002, 0x28: 0x6002, 0x29: 0x6002,
+	0x2a: 0x6002, 0x2b: 0x6002, 0x2c: 0x6002, 0x2d: 0x6002, 0x2e: 0x6002, 0x2f: 0x6002,
+	0x30: 0x6002, 0x31: 0x6002, 0x32: 0x6002, 0x33: 0x6002, 0x34: 0x6002, 0x35: 0x6002,
+	0x36: 0x6002, 0x37: 0x6002, 0x38: 0x6002, 0x39: 0x6002, 0x3a: 0x6002, 0x3b: 0x6002,
+	0x3c: 0x6002, 0x3d: 0x6002, 0x3e: 0x6002, 0x3f: 0x6002,
+	// Block 0x1, offset 0x40
+	0x40: 0x6003, 0x41: 0x6003, 0x42: 0x6003, 0x43: 0x6003, 0x44: 0x6003, 0x45: 0x6003,
+	0x46: 0x6003, 0x47: 0x6003, 0x48: 0x6003, 0x49: 0x6003, 0x4a: 0x6003, 0x4b: 0x6003,
+	0x4c: 0x6003, 0x4d: 0x6003, 0x4e: 0x6003, 0x4f: 0x6003, 0x50: 0x6003, 0x51: 0x6003,
+	0x52: 0x6003, 0x53: 0x6003, 0x54: 0x6003, 0x55: 0x6003, 0x56: 0x6003, 0x57: 0x6003,
+	0x58: 0x6003, 0x59: 0x6003, 0x5a: 0x6003, 0x5b: 0x6003, 0x5c: 0x6003, 0x5d: 0x6003,
+	0x5e: 0x6003, 0x5f: 0x6003, 0x60: 0x6004, 0x61: 0x6004, 0x62: 0x6004, 0x63: 0x6004,
+	0x64: 0x6004, 0x65: 0x6004, 0x66: 0x6004, 0x67: 0x6004, 0x68: 0x6004, 0x69: 0x6004,
+	0x6a: 0x6004, 0x6b: 0x6004, 0x6c: 0x6004, 0x6d: 0x6004, 0x6e: 0x6004, 0x6f: 0x6004,
+	0x70: 0x6004, 0x71: 0x6004, 0x72: 0x6004, 0x73: 0x6004, 0x74: 0x6004, 0x75: 0x6004,
+	0x76: 0x6004, 0x77: 0x6004, 0x78: 0x6004, 0x79: 0x6004, 0x7a: 0x6004, 0x7b: 0x6004,
+	0x7c: 0x6004, 0x7d: 0x6004, 0x7e: 0x6004,
+	// Block 0x2, offset 0x80
+	// Block 0x3, offset 0xc0
+	0xe1: 0x2000, 0xe2: 0x6005, 0xe3: 0x6005,
+	0xe4: 0x2000, 0xe5: 0x6006, 0xe6: 0x6005, 0xe7: 0x2000, 0xe8: 0x2000,
+	0xea: 0x2000, 0xec: 0x6007, 0xed: 0x2000, 0xee: 0x2000, 0xef: 0x6008,
+	0xf0: 0x2000, 0xf1: 0x2000, 0xf2: 0x2000, 0xf3: 0x2000, 0xf4: 0x2000,
+	0xf6: 0x2000, 0xf7: 0x2000, 0xf8: 0x2000, 0xf9: 0x2000, 0xfa: 0x2000,
+	0xfc: 0x2000, 0xfd: 0x2000, 0xfe: 0x2000, 0xff: 0x2000,
+	// Block 0x4, offset 0x100
+	0x106: 0x2000,
+	0x110: 0x2000,
+	0x117: 0x2000,
+	0x118: 0x2000,
+	0x11e: 0x2000, 0x11f: 0x2000, 0x120: 0x2000, 0x121: 0x2000,
+	0x126: 0x2000, 0x128: 0x2000, 0x129: 0x2000,
+	0x12a: 0x2000, 0x12c: 0x2000, 0x12d: 0x2000,
+	0x130: 0x2000, 0x132: 0x2000, 0x133: 0x2000,
+	0x137: 0x2000, 0x138: 0x2000, 0x139: 0x2000, 0x13a: 0x2000,
+	0x13c: 0x2000, 0x13e: 0x2000,
+	// Block 0x5, offset 0x140
+	0x141: 0x2000,
+	0x151: 0x2000,
+	0x153: 0x2000,
+	0x15b: 0x2000,
+	0x166: 0x2000, 0x167: 0x2000,
+	0x16b: 0x2000,
+	0x171: 0x2000, 0x172: 0x2000, 0x173: 0x2000,
+	0x178: 0x2000,
+	0x17f: 0x2000,
+	// Block 0x6, offset 0x180
+	0x180: 0x2000, 0x181: 0x2000, 0x182: 0x2000, 0x184: 0x2000,
+	0x188: 0x2000, 0x189: 0x2000, 0x18a: 0x2000, 0x18b: 0x2000,
+	0x18d: 0x2000,
+	0x192: 0x2000, 0x193: 0x2000,
+	0x1a6: 0x2000, 0x1a7: 0x2000,
+	0x1ab: 0x2000,
+	// Block 0x7, offset 0x1c0
+	0x1ce: 0x2000, 0x1d0: 0x2000,
+	0x1d2: 0x2000, 0x1d4: 0x2000, 0x1d6: 0x2000,
+	0x1d8: 0x2000, 0x1da: 0x2000, 0x1dc: 0x2000,
+	// Block 0x8, offset 0x200
+	0x211: 0x2000,
+	0x221: 0x2000,
+	// Block 0x9, offset 0x240
+	0x244: 0x2000,
+	0x247: 0x2000, 0x249: 0x2000, 0x24a: 0x2000, 0x24b: 0x2000,
+	0x24d: 0x2000, 0x250: 0x2000,
+	0x258: 0x2000, 0x259: 0x2000, 0x25a: 0x2000, 0x25b: 0x2000, 0x25d: 0x2000,
+	0x25f: 0x2000,
+	// Block 0xa, offset 0x280
+	0x280: 0x2000, 0x281: 0x2000, 0x282: 0x2000, 0x283: 0x2000, 0x284: 0x2000, 0x285: 0x2000,
+	0x286: 0x2000, 0x287: 0x2000, 0x288: 0x2000, 0x289: 0x2000, 0x28a: 0x2000, 0x28b: 0x2000,
+	0x28c: 0x2000, 0x28d: 0x2000, 0x28e: 0x2000, 0x28f: 0x2000, 0x290: 0x2000, 0x291: 0x2000,
+	0x292: 0x2000, 0x293: 0x2000, 0x294: 0x2000, 0x295: 0x2000, 0x296: 0x2000, 0x297: 0x2000,
+	0x298: 0x2000, 0x299: 0x2000, 0x29a: 0x2000, 0x29b: 0x2000, 0x29c: 0x2000, 0x29d: 0x2000,
+	0x29e: 0x2000, 0x29f: 0x2000, 0x2a0: 0x2000, 0x2a1: 0x2000, 0x2a2: 0x2000, 0x2a3: 0x2000,
+	0x2a4: 0x2000, 0x2a5: 0x2000, 0x2a6: 0x2000, 0x2a7: 0x2000, 0x2a8: 0x2000, 0x2a9: 0x2000,
+	0x2aa: 0x2000, 0x2ab: 0x2000, 0x2ac: 0x2000, 0x2ad: 0x2000, 0x2ae: 0x2000, 0x2af: 0x2000,
+	0x2b0: 0x2000, 0x2b1: 0x2000, 0x2b2: 0x2000, 0x2b3: 0x2000, 0x2b4: 0x2000, 0x2b5: 0x2000,
+	0x2b6: 0x2000, 0x2b7: 0x2000, 0x2b8: 0x2000, 0x2b9: 0x2000, 0x2ba: 0x2000, 0x2bb: 0x2000,
+	0x2bc: 0x2000, 0x2bd: 0x2000, 0x2be: 0x2000, 0x2bf: 0x2000,
+	// Block 0xb, offset 0x2c0
+	0x2c0: 0x2000, 0x2c1: 0x2000, 0x2c2: 0x2000, 0x2c3: 0x2000, 0x2c4: 0x2000, 0x2c5: 0x2000,
+	0x2c6: 0x2000, 0x2c7: 0x2000, 0x2c8: 0x2000, 0x2c9: 0x2000, 0x2ca: 0x2000, 0x2cb: 0x2000,
+	0x2cc: 0x2000, 0x2cd: 0x2000, 0x2ce: 0x2000, 0x2cf: 0x2000, 0x2d0: 0x2000, 0x2d1: 0x2000,
+	0x2d2: 0x2000, 0x2d3: 0x2000, 0x2d4: 0x2000, 0x2d5: 0x2000, 0x2d6: 0x2000, 0x2d7: 0x2000,
+	0x2d8: 0x2000, 0x2d9: 0x2000, 0x2da: 0x2000, 0x2db: 0x2000, 0x2dc: 0x2000, 0x2dd: 0x2000,
+	0x2de: 0x2000, 0x2df: 0x2000, 0x2e0: 0x2000, 0x2e1: 0x2000, 0x2e2: 0x2000, 0x2e3: 0x2000,
+	0x2e4: 0x2000, 0x2e5: 0x2000, 0x2e6: 0x2000, 0x2e7: 0x2000, 0x2e8: 0x2000, 0x2e9: 0x2000,
+	0x2ea: 0x2000, 0x2eb: 0x2000, 0x2ec: 0x2000, 0x2ed: 0x2000, 0x2ee: 0x2000, 0x2ef: 0x2000,
+	// Block 0xc, offset 0x300
+	0x311: 0x2000,
+	0x312: 0x2000, 0x313: 0x2000, 0x314: 0x2000, 0x315: 0x2000, 0x316: 0x2000, 0x317: 0x2000,
+	0x318: 0x2000, 0x319: 0x2000, 0x31a: 0x2000, 0x31b: 0x2000, 0x31c: 0x2000, 0x31d: 0x2000,
+	0x31e: 0x2000, 0x31f: 0x2000, 0x320: 0x2000, 0x321: 0x2000, 0x323: 0x2000,
+	0x324: 0x2000, 0x325: 0x2000, 0x326: 0x2000, 0x327: 0x2000, 0x328: 0x2000, 0x329: 0x2000,
+	0x331: 0x2000, 0x332: 0x2000, 0x333: 0x2000, 0x334: 0x2000, 0x335: 0x2000,
+	0x336: 0x2000, 0x337: 0x2000, 0x338: 0x2000, 0x339: 0x2000, 0x33a: 0x2000, 0x33b: 0x2000,
+	0x33c: 0x2000, 0x33d: 0x2000, 0x33e: 0x2000, 0x33f: 0x2000,
+	// Block 0xd, offset 0x340
+	0x340: 0x2000, 0x341: 0x2000, 0x343: 0x2000, 0x344: 0x2000, 0x345: 0x2000,
+	0x346: 0x2000, 0x347: 0x2000, 0x348: 0x2000, 0x349: 0x2000,
+	// Block 0xe, offset 0x380
+	0x381: 0x2000,
+	0x390: 0x2000, 0x391: 0x2000,
+	0x392: 0x2000, 0x393: 0x2000, 0x394: 0x2000, 0x395: 0x2000, 0x396: 0x2000, 0x397: 0x2000,
+	0x398: 0x2000, 0x399: 0x2000, 0x39a: 0x2000, 0x39b: 0x2000, 0x39c: 0x2000, 0x39d: 0x2000,
+	0x39e: 0x2000, 0x39f: 0x2000, 0x3a0: 0x2000, 0x3a1: 0x2000, 0x3a2: 0x2000, 0x3a3: 0x2000,
+	0x3a4: 0x2000, 0x3a5: 0x2000, 0x3a6: 0x2000, 0x3a7: 0x2000, 0x3a8: 0x2000, 0x3a9: 0x2000,
+	0x3aa: 0x2000, 0x3ab: 0x2000, 0x3ac: 0x2000, 0x3ad: 0x2000, 0x3ae: 0x2000, 0x3af: 0x2000,
+	0x3b0: 0x2000, 0x3b1: 0x2000, 0x3b2: 0x2000, 0x3b3: 0x2000, 0x3b4: 0x2000, 0x3b5: 0x2000,
+	0x3b6: 0x2000, 0x3b7: 0x2000, 0x3b8: 0x2000, 0x3b9: 0x2000, 0x3ba: 0x2000, 0x3bb: 0x2000,
+	0x3bc: 0x2000, 0x3bd: 0x2000, 0x3be: 0x2000, 0x3bf: 0x2000,
+	// Block 0xf, offset 0x3c0
+	0x3c0: 0x2000, 0x3c1: 0x2000, 0x3c2: 0x2000, 0x3c3: 0x2000, 0x3c4: 0x2000, 0x3c5: 0x2000,
+	0x3c6: 0x2000, 0x3c7: 0x2000, 0x3c8: 0x2000, 0x3c9: 0x2000, 0x3ca: 0x2000, 0x3cb: 0x2000,
+	0x3cc: 0x2000, 0x3cd: 0x2000, 0x3ce: 0x2000, 0x3cf: 0x2000, 0x3d1: 0x2000,
+	// Block 0x10, offset 0x400
+	0x400: 0x4000, 0x401: 0x4000, 0x402: 0x4000, 0x403: 0x4000, 0x404: 0x4000, 0x405: 0x4000,
+	0x406: 0x4000, 0x407: 0x4000, 0x408: 0x4000, 0x409: 0x4000, 0x40a: 0x4000, 0x40b: 0x4000,
+	0x40c: 0x4000, 0x40d: 0x4000, 0x40e: 0x4000, 0x40f: 0x4000, 0x410: 0x4000, 0x411: 0x4000,
+	0x412: 0x4000, 0x413: 0x4000, 0x414: 0x4000, 0x415: 0x4000, 0x416: 0x4000, 0x417: 0x4000,
+	0x418: 0x4000, 0x419: 0x4000, 0x41a: 0x4000, 0x41b: 0x4000, 0x41c: 0x4000, 0x41d: 0x4000,
+	0x41e: 0x4000, 0x41f: 0x4000, 0x420: 0x4000, 0x421: 0x4000, 0x422: 0x4000, 0x423: 0x4000,
+	0x424: 0x4000, 0x425: 0x4000, 0x426: 0x4000, 0x427: 0x4000, 0x428: 0x4000, 0x429: 0x4000,
+	0x42a: 0x4000, 0x42b: 0x4000, 0x42c: 0x4000, 0x42d: 0x4000, 0x42e: 0x4000, 0x42f: 0x4000,
+	0x430: 0x4000, 0x431: 0x4000, 0x432: 0x4000, 0x433: 0x4000, 0x434: 0x4000, 0x435: 0x4000,
+	0x436: 0x4000, 0x437: 0x4000, 0x438: 0x4000, 0x439: 0x4000, 0x43a: 0x4000, 0x43b: 0x4000,
+	0x43c: 0x4000, 0x43d: 0x4000, 0x43e: 0x4000, 0x43f: 0x4000,
+	// Block 0x11, offset 0x440
+	0x440: 0x4000, 0x441: 0x4000, 0x442: 0x4000, 0x443: 0x4000, 0x444: 0x4000, 0x445: 0x4000,
+	0x446: 0x4000, 0x447: 0x4000, 0x448: 0x4000, 0x449: 0x4000, 0x44a: 0x4000, 0x44b: 0x4000,
+	0x44c: 0x4000, 0x44d: 0x4000, 0x44e: 0x4000, 0x44f: 0x4000, 0x450: 0x4000, 0x451: 0x4000,
+	0x452: 0x4000, 0x453: 0x4000, 0x454: 0x4000, 0x455: 0x4000, 0x456: 0x4000, 0x457: 0x4000,
+	0x458: 0x4000, 0x459: 0x4000, 0x45a: 0x4000, 0x45b: 0x4000, 0x45c: 0x4000, 0x45d: 0x4000,
+	0x45e: 0x4000, 0x45f: 0x4000,
+	// Block 0x12, offset 0x480
+	0x490: 0x2000,
+	0x493: 0x2000, 0x494: 0x2000, 0x495: 0x2000, 0x496: 0x2000,
+	0x498: 0x2000, 0x499: 0x2000, 0x49c: 0x2000, 0x49d: 0x2000,
+	0x4a0: 0x2000, 0x4a1: 0x2000, 0x4a2: 0x2000,
+	0x4a4: 0x2000, 0x4a5: 0x2000, 0x4a6: 0x2000, 0x4a7: 0x2000,
+	0x4b0: 0x2000, 0x4b2: 0x2000, 0x4b3: 0x2000, 0x4b5: 0x2000,
+	0x4bb: 0x2000,
+	0x4be: 0x2000,
+	// Block 0x13, offset 0x4c0
+	0x4f4: 0x2000,
+	0x4ff: 0x2000,
+	// Block 0x14, offset 0x500
+	0x501: 0x2000, 0x502: 0x2000, 0x503: 0x2000, 0x504: 0x2000,
+	0x529: 0xa009,
+	0x52c: 0x2000,
+	// Block 0x15, offset 0x540
+	0x543: 0x2000, 0x545: 0x2000,
+	0x549: 0x2000,
+	0x553: 0x2000, 0x556: 0x2000,
+	0x561: 0x2000, 0x562: 0x2000,
+	0x566: 0x2000,
+	0x56b: 0x2000,
+	// Block 0x16, offset 0x580
+	0x593: 0x2000, 0x594: 0x2000,
+	0x59b: 0x2000, 0x59c: 0x2000, 0x59d: 0x2000,
+	0x59e: 0x2000, 0x5a0: 0x2000, 0x5a1: 0x2000, 0x5a2: 0x2000, 0x5a3: 0x2000,
+	0x5a4: 0x2000, 0x5a5: 0x2000, 0x5a6: 0x2000, 0x5a7: 0x2000, 0x5a8: 0x2000, 0x5a9: 0x2000,
+	0x5aa: 0x2000, 0x5ab: 0x2000,
+	0x5b0: 0x2000, 0x5b1: 0x2000, 0x5b2: 0x2000, 0x5b3: 0x2000, 0x5b4: 0x2000, 0x5b5: 0x2000,
+	0x5b6: 0x2000, 0x5b7: 0x2000, 0x5b8: 0x2000, 0x5b9: 0x2000,
+	// Block 0x17, offset 0x5c0
+	0x5c9: 0x2000,
+	0x5d0: 0x200a, 0x5d1: 0x200b,
+	0x5d2: 0x200a, 0x5d3: 0x200c, 0x5d4: 0x2000, 0x5d5: 0x2000, 0x5d6: 0x2000, 0x5d7: 0x2000,
+	0x5d8: 0x2000, 0x5d9: 0x2000,
+	0x5f8: 0x2000, 0x5f9: 0x2000,
+	// Block 0x18, offset 0x600
+	0x612: 0x2000, 0x614: 0x2000,
+	0x627: 0x2000,
+	// Block 0x19, offset 0x640
+	0x640: 0x2000, 0x642: 0x2000, 0x643: 0x2000,
+	0x647: 0x2000, 0x648: 0x2000, 0x64b: 0x2000,
+	0x64f: 0x2000, 0x651: 0x2000,
+	0x655: 0x2000,
+	0x65a: 0x2000, 0x65d: 0x2000,
+	0x65e: 0x2000, 0x65f: 0x2000, 0x660: 0x2000, 0x663: 0x2000,
+	0x665: 0x2000, 0x667: 0x2000, 0x668: 0x2000, 0x669: 0x2000,
+	0x66a: 0x2000, 0x66b: 0x2000, 0x66c: 0x2000, 0x66e: 0x2000,
+	0x674: 0x2000, 0x675: 0x2000,
+	0x676: 0x2000, 0x677: 0x2000,
+	0x67c: 0x2000, 0x67d: 0x2000,
+	// Block 0x1a, offset 0x680
+	0x688: 0x2000,
+	0x68c: 0x2000,
+	0x692: 0x2000,
+	0x6a0: 0x2000, 0x6a1: 0x2000,
+	0x6a4: 0x2000, 0x6a5: 0x2000, 0x6a6: 0x2000, 0x6a7: 0x2000,
+	0x6aa: 0x2000, 0x6ab: 0x2000, 0x6ae: 0x2000, 0x6af: 0x2000,
+	// Block 0x1b, offset 0x6c0
+	0x6c2: 0x2000, 0x6c3: 0x2000,
+	0x6c6: 0x2000, 0x6c7: 0x2000,
+	0x6d5: 0x2000,
+	0x6d9: 0x2000,
+	0x6e5: 0x2000,
+	0x6ff: 0x2000,
+	// Block 0x1c, offset 0x700
+	0x712: 0x2000,
+	0x71a: 0x4000, 0x71b: 0x4000,
+	0x729: 0x4000,
+	0x72a: 0x4000,
+	// Block 0x1d, offset 0x740
+	0x769: 0x4000,
+	0x76a: 0x4000, 0x76b: 0x4000, 0x76c: 0x4000,
+	0x770: 0x4000, 0x773: 0x4000,
+	// Block 0x1e, offset 0x780
+	0x7a0: 0x2000, 0x7a1: 0x2000, 0x7a2: 0x2000, 0x7a3: 0x2000,
+	0x7a4: 0x2000, 0x7a5: 0x2000, 0x7a6: 0x2000, 0x7a7: 0x2000, 0x7a8: 0x2000, 0x7a9: 0x2000,
+	0x7aa: 0x2000, 0x7ab: 0x2000, 0x7ac: 0x2000, 0x7ad: 0x2000, 0x7ae: 0x2000, 0x7af: 0x2000,
+	0x7b0: 0x2000, 0x7b1: 0x2000, 0x7b2: 0x2000, 0x7b3: 0x2000, 0x7b4: 0x2000, 0x7b5: 0x2000,
+	0x7b6: 0x2000, 0x7b7: 0x2000, 0x7b8: 0x2000, 0x7b9: 0x2000, 0x7ba: 0x2000, 0x7bb: 0x2000,
+	0x7bc: 0x2000, 0x7bd: 0x2000, 0x7be: 0x2000, 0x7bf: 0x2000,
+	// Block 0x1f, offset 0x7c0
+	0x7c0: 0x2000, 0x7c1: 0x2000, 0x7c2: 0x2000, 0x7c3: 0x2000, 0x7c4: 0x2000, 0x7c5: 0x2000,
+	0x7c6: 0x2000, 0x7c7: 0x2000, 0x7c8: 0x2000, 0x7c9: 0x2000, 0x7ca: 0x2000, 0x7cb: 0x2000,
+	0x7cc: 0x2000, 0x7cd: 0x2000, 0x7ce: 0x2000, 0x7cf: 0x2000, 0x7d0: 0x2000, 0x7d1: 0x2000,
+	0x7d2: 0x2000, 0x7d3: 0x2000, 0x7d4: 0x2000, 0x7d5: 0x2000, 0x7d6: 0x2000, 0x7d7: 0x2000,
+	0x7d8: 0x2000, 0x7d9: 0x2000, 0x7da: 0x2000, 0x7db: 0x2000, 0x7dc: 0x2000, 0x7dd: 0x2000,
+	0x7de: 0x2000, 0x7df: 0x2000, 0x7e0: 0x2000, 0x7e1: 0x2000, 0x7e2: 0x2000, 0x7e3: 0x2000,
+	0x7e4: 0x2000, 0x7e5: 0x2000, 0x7e6: 0x2000, 0x7e7: 0x2000, 0x7e8: 0x2000, 0x7e9: 0x2000,
+	0x7eb: 0x2000, 0x7ec: 0x2000, 0x7ed: 0x2000, 0x7ee: 0x2000, 0x7ef: 0x2000,
+	0x7f0: 0x2000, 0x7f1: 0x2000, 0x7f2: 0x2000, 0x7f3: 0x2000, 0x7f4: 0x2000, 0x7f5: 0x2000,
+	0x7f6: 0x2000, 0x7f7: 0x2000, 0x7f8: 0x2000, 0x7f9: 0x2000, 0x7fa: 0x2000, 0x7fb: 0x2000,
+	0x7fc: 0x2000, 0x7fd: 0x2000, 0x7fe: 0x2000, 0x7ff: 0x2000,
+	// Block 0x20, offset 0x800
+	0x800: 0x2000, 0x801: 0x2000, 0x802: 0x200d, 0x803: 0x2000, 0x804: 0x2000, 0x805: 0x2000,
+	0x806: 0x2000, 0x807: 0x2000, 0x808: 0x2000, 0x809: 0x2000, 0x80a: 0x2000, 0x80b: 0x2000,
+	0x80c: 0x2000, 0x80d: 0x2000, 0x80e: 0x2000, 0x80f: 0x2000, 0x810: 0x2000, 0x811: 0x2000,
+	0x812: 0x2000, 0x813: 0x2000, 0x814: 0x2000, 0x815: 0x2000, 0x816: 0x2000, 0x817: 0x2000,
+	0x818: 0x2000, 0x819: 0x2000, 0x81a: 0x2000, 0x81b: 0x2000, 0x81c: 0x2000, 0x81d: 0x2000,
+	0x81e: 0x2000, 0x81f: 0x2000, 0x820: 0x2000, 0x821: 0x2000, 0x822: 0x2000, 0x823: 0x2000,
+	0x824: 0x2000, 0x825: 0x2000, 0x826: 0x2000, 0x827: 0x2000, 0x828: 0x2000, 0x829: 0x2000,
+	0x82a: 0x2000, 0x82b: 0x2000, 0x82c: 0x2000, 0x82d: 0x2000, 0x82e: 0x2000, 0x82f: 0x2000,
+	0x830: 0x2000, 0x831: 0x2000, 0x832: 0x2000, 0x833: 0x2000, 0x834: 0x2000, 0x835: 0x2000,
+	0x836: 0x2000, 0x837: 0x2000, 0x838: 0x2000, 0x839: 0x2000, 0x83a: 0x2000, 0x83b: 0x2000,
+	0x83c: 0x2000, 0x83d: 0x2000, 0x83e: 0x2000, 0x83f: 0x2000,
+	// Block 0x21, offset 0x840
+	0x840: 0x2000, 0x841: 0x2000, 0x842: 0x2000, 0x843: 0x2000, 0x844: 0x2000, 0x845: 0x2000,
+	0x846: 0x2000, 0x847: 0x2000, 0x848: 0x2000, 0x849: 0x2000, 0x84a: 0x2000, 0x84b: 0x2000,
+	0x850: 0x2000, 0x851: 0x2000,
+	0x852: 0x2000, 0x853: 0x2000, 0x854: 0x2000, 0x855: 0x2000, 0x856: 0x2000, 0x857: 0x2000,
+	0x858: 0x2000, 0x859: 0x2000, 0x85a: 0x2000, 0x85b: 0x2000, 0x85c: 0x2000, 0x85d: 0x2000,
+	0x85e: 0x2000, 0x85f: 0x2000, 0x860: 0x2000, 0x861: 0x2000, 0x862: 0x2000, 0x863: 0x2000,
+	0x864: 0x2000, 0x865: 0x2000, 0x866: 0x2000, 0x867: 0x2000, 0x868: 0x2000, 0x869: 0x2000,
+	0x86a: 0x2000, 0x86b: 0x2000, 0x86c: 0x2000, 0x86d: 0x2000, 0x86e: 0x2000, 0x86f: 0x2000,
+	0x870: 0x2000, 0x871: 0x2000, 0x872: 0x2000, 0x873: 0x2000,
+	// Block 0x22, offset 0x880
+	0x880: 0x2000, 0x881: 0x2000, 0x882: 0x2000, 0x883: 0x2000, 0x884: 0x2000, 0x885: 0x2000,
+	0x886: 0x2000, 0x887: 0x2000, 0x888: 0x2000, 0x889: 0x2000, 0x88a: 0x2000, 0x88b: 0x2000,
+	0x88c: 0x2000, 0x88d: 0x2000, 0x88e: 0x2000, 0x88f: 0x2000,
+	0x892: 0x2000, 0x893: 0x2000, 0x894: 0x2000, 0x895: 0x2000,
+	0x8a0: 0x200e, 0x8a1: 0x2000, 0x8a3: 0x2000,
+	0x8a4: 0x2000, 0x8a5: 0x2000, 0x8a6: 0x2000, 0x8a7: 0x2000, 0x8a8: 0x2000, 0x8a9: 0x2000,
+	0x8b2: 0x2000, 0x8b3: 0x2000,
+	0x8b6: 0x2000, 0x8b7: 0x2000,
+	0x8bc: 0x2000, 0x8bd: 0x2000,
+	// Block 0x23, offset 0x8c0
+	0x8c0: 0x2000, 0x8c1: 0x2000,
+	0x8c6: 0x2000, 0x8c7: 0x2000, 0x8c8: 0x2000, 0x8cb: 0x200f,
+	0x8ce: 0x2000, 0x8cf: 0x2000, 0x8d0: 0x2000, 0x8d1: 0x2000,
+	0x8e2: 0x2000, 0x8e3: 0x2000,
+	0x8e4: 0x2000, 0x8e5: 0x2000,
+	0x8ef: 0x2000,
+	0x8fd: 0x4000, 0x8fe: 0x4000,
+	// Block 0x24, offset 0x900
+	0x905: 0x2000,
+	0x906: 0x2000, 0x909: 0x2000,
+	0x90e: 0x2000, 0x90f: 0x2000,
+	0x914: 0x4000, 0x915: 0x4000,
+	0x91c: 0x2000,
+	0x91e: 0x2000,
+	// Block 0x25, offset 0x940
+	0x940: 0x2000, 0x942: 0x2000,
+	0x948: 0x4000, 0x949: 0x4000, 0x94a: 0x4000, 0x94b: 0x4000,
+	0x94c: 0x4000, 0x94d: 0x4000, 0x94e: 0x4000, 0x94f: 0x4000, 0x950: 0x4000, 0x951: 0x4000,
+	0x952: 0x4000, 0x953: 0x4000,
+	0x960: 0x2000, 0x961: 0x2000, 0x963: 0x2000,
+	0x964: 0x2000, 0x965: 0x2000, 0x967: 0x2000, 0x968: 0x2000, 0x969: 0x2000,
+	0x96a: 0x2000, 0x96c: 0x2000, 0x96d: 0x2000, 0x96f: 0x2000,
+	0x97f: 0x4000,
+	// Block 0x26, offset 0x980
+	0x993: 0x4000,
+	0x99e: 0x2000, 0x99f: 0x2000, 0x9a1: 0x4000,
+	0x9aa: 0x4000, 0x9ab: 0x4000,
+	0x9bd: 0x4000, 0x9be: 0x4000, 0x9bf: 0x2000,
+	// Block 0x27, offset 0x9c0
+	0x9c4: 0x4000, 0x9c5: 0x4000,
+	0x9c6: 0x2000, 0x9c7: 0x2000, 0x9c8: 0x2000, 0x9c9: 0x2000, 0x9ca: 0x2000, 0x9cb: 0x2000,
+	0x9cc: 0x2000, 0x9cd: 0x2000, 0x9ce: 0x4000, 0x9cf: 0x2000, 0x9d0: 0x2000, 0x9d1: 0x2000,
+	0x9d2: 0x2000, 0x9d3: 0x2000, 0x9d4: 0x4000, 0x9d5: 0x2000, 0x9d6: 0x2000, 0x9d7: 0x2000,
+	0x9d8: 0x2000, 0x9d9: 0x2000, 0x9da: 0x2000, 0x9db: 0x2000, 0x9dc: 0x2000, 0x9dd: 0x2000,
+	0x9de: 0x2000, 0x9df: 0x2000, 0x9e0: 0x2000, 0x9e1: 0x2000, 0x9e3: 0x2000,
+	0x9e8: 0x2000, 0x9e9: 0x2000,
+	0x9ea: 0x4000, 0x9eb: 0x2000, 0x9ec: 0x2000, 0x9ed: 0x2000, 0x9ee: 0x2000, 0x9ef: 0x2000,
+	0x9f0: 0x2000, 0x9f1: 0x2000, 0x9f2: 0x4000, 0x9f3: 0x4000, 0x9f4: 0x2000, 0x9f5: 0x4000,
+	0x9f6: 0x2000, 0x9f7: 0x2000, 0x9f8: 0x2000, 0x9f9: 0x2000, 0x9fa: 0x4000, 0x9fb: 0x2000,
+	0x9fc: 0x2000, 0x9fd: 0x4000, 0x9fe: 0x2000, 0x9ff: 0x2000,
+	// Block 0x28, offset 0xa00
+	0xa05: 0x4000,
+	0xa0a: 0x4000, 0xa0b: 0x4000,
+	0xa28: 0x4000,
+	0xa3d: 0x2000,
+	// Block 0x29, offset 0xa40
+	0xa4c: 0x4000, 0xa4e: 0x4000,
+	0xa53: 0x4000, 0xa54: 0x4000, 0xa55: 0x4000, 0xa57: 0x4000,
+	0xa76: 0x2000, 0xa77: 0x2000, 0xa78: 0x2000, 0xa79: 0x2000, 0xa7a: 0x2000, 0xa7b: 0x2000,
+	0xa7c: 0x2000, 0xa7d: 0x2000, 0xa7e: 0x2000, 0xa7f: 0x2000,
+	// Block 0x2a, offset 0xa80
+	0xa95: 0x4000, 0xa96: 0x4000, 0xa97: 0x4000,
+	0xab0: 0x4000,
+	0xabf: 0x4000,
+	// Block 0x2b, offset 0xac0
+	0xae6: 0x6000, 0xae7: 0x6000, 0xae8: 0x6000, 0xae9: 0x6000,
+	0xaea: 0x6000, 0xaeb: 0x6000, 0xaec: 0x6000, 0xaed: 0x6000,
+	// Block 0x2c, offset 0xb00
+	0xb05: 0x6010,
+	0xb06: 0x6011,
+	// Block 0x2d, offset 0xb40
+	0xb5b: 0x4000, 0xb5c: 0x4000,
+	// Block 0x2e, offset 0xb80
+	0xb90: 0x4000,
+	0xb95: 0x4000, 0xb96: 0x2000, 0xb97: 0x2000,
+	0xb98: 0x2000, 0xb99: 0x2000,
+	// Block 0x2f, offset 0xbc0
+	0xbc0: 0x4000, 0xbc1: 0x4000, 0xbc2: 0x4000, 0xbc3: 0x4000, 0xbc4: 0x4000, 0xbc5: 0x4000,
+	0xbc6: 0x4000, 0xbc7: 0x4000, 0xbc8: 0x4000, 0xbc9: 0x4000, 0xbca: 0x4000, 0xbcb: 0x4000,
+	0xbcc: 0x4000, 0xbcd: 0x4000, 0xbce: 0x4000, 0xbcf: 0x4000, 0xbd0: 0x4000, 0xbd1: 0x4000,
+	0xbd2: 0x4000, 0xbd3: 0x4000, 0xbd4: 0x4000, 0xbd5: 0x4000, 0xbd6: 0x4000, 0xbd7: 0x4000,
+	0xbd8: 0x4000, 0xbd9: 0x4000, 0xbdb: 0x4000, 0xbdc: 0x4000, 0xbdd: 0x4000,
+	0xbde: 0x4000, 0xbdf: 0x4000, 0xbe0: 0x4000, 0xbe1: 0x4000, 0xbe2: 0x4000, 0xbe3: 0x4000,
+	0xbe4: 0x4000, 0xbe5: 0x4000, 0xbe6: 0x4000, 0xbe7: 0x4000, 0xbe8: 0x4000, 0xbe9: 0x4000,
+	0xbea: 0x4000, 0xbeb: 0x4000, 0xbec: 0x4000, 0xbed: 0x4000, 0xbee: 0x4000, 0xbef: 0x4000,
+	0xbf0: 0x4000, 0xbf1: 0x4000, 0xbf2: 0x4000, 0xbf3: 0x4000, 0xbf4: 0x4000, 0xbf5: 0x4000,
+	0xbf6: 0x4000, 0xbf7: 0x4000, 0xbf8: 0x4000, 0xbf9: 0x4000, 0xbfa: 0x4000, 0xbfb: 0x4000,
+	0xbfc: 0x4000, 0xbfd: 0x4000, 0xbfe: 0x4000, 0xbff: 0x4000,
+	// Block 0x30, offset 0xc00
+	0xc00: 0x4000, 0xc01: 0x4000, 0xc02: 0x4000, 0xc03: 0x4000, 0xc04: 0x4000, 0xc05: 0x4000,
+	0xc06: 0x4000, 0xc07: 0x4000, 0xc08: 0x4000, 0xc09: 0x4000, 0xc0a: 0x4000, 0xc0b: 0x4000,
+	0xc0c: 0x4000, 0xc0d: 0x4000, 0xc0e: 0x4000, 0xc0f: 0x4000, 0xc10: 0x4000, 0xc11: 0x4000,
+	0xc12: 0x4000, 0xc13: 0x4000, 0xc14: 0x4000, 0xc15: 0x4000, 0xc16: 0x4000, 0xc17: 0x4000,
+	0xc18: 0x4000, 0xc19: 0x4000, 0xc1a: 0x4000, 0xc1b: 0x4000, 0xc1c: 0x4000, 0xc1d: 0x4000,
+	0xc1e: 0x4000, 0xc1f: 0x4000, 0xc20: 0x4000, 0xc21: 0x4000, 0xc22: 0x4000, 0xc23: 0x4000,
+	0xc24: 0x4000, 0xc25: 0x4000, 0xc26: 0x4000, 0xc27: 0x4000, 0xc28: 0x4000, 0xc29: 0x4000,
+	0xc2a: 0x4000, 0xc2b: 0x4000, 0xc2c: 0x4000, 0xc2d: 0x4000, 0xc2e: 0x4000, 0xc2f: 0x4000,
+	0xc30: 0x4000, 0xc31: 0x4000, 0xc32: 0x4000, 0xc33: 0x4000,
+	// Block 0x31, offset 0xc40
+	0xc40: 0x4000, 0xc41: 0x4000, 0xc42: 0x4000, 0xc43: 0x4000, 0xc44: 0x4000, 0xc45: 0x4000,
+	0xc46: 0x4000, 0xc47: 0x4000, 0xc48: 0x4000, 0xc49: 0x4000, 0xc4a: 0x4000, 0xc4b: 0x4000,
+	0xc4c: 0x4000, 0xc4d: 0x4000, 0xc4e: 0x4000, 0xc4f: 0x4000, 0xc50: 0x4000, 0xc51: 0x4000,
+	0xc52: 0x4000, 0xc53: 0x4000, 0xc54: 0x4000, 0xc55: 0x4000,
+	0xc70: 0x4000, 0xc71: 0x4000, 0xc72: 0x4000, 0xc73: 0x4000, 0xc74: 0x4000, 0xc75: 0x4000,
+	0xc76: 0x4000, 0xc77: 0x4000, 0xc78: 0x4000, 0xc79: 0x4000, 0xc7a: 0x4000, 0xc7b: 0x4000,
+	// Block 0x32, offset 0xc80
+	0xc80: 0x9012, 0xc81: 0x4013, 0xc82: 0x4014, 0xc83: 0x4000, 0xc84: 0x4000, 0xc85: 0x4000,
+	0xc86: 0x4000, 0xc87: 0x4000, 0xc88: 0x4000, 0xc89: 0x4000, 0xc8a: 0x4000, 0xc8b: 0x4000,
+	0xc8c: 0x4015, 0xc8d: 0x4015, 0xc8e: 0x4000, 0xc8f: 0x4000, 0xc90: 0x4000, 0xc91: 0x4000,
+	0xc92: 0x4000, 0xc93: 0x4000, 0xc94: 0x4000, 0xc95: 0x4000, 0xc96: 0x4000, 0xc97: 0x4000,
+	0xc98: 0x4000, 0xc99: 0x4000, 0xc9a: 0x4000, 0xc9b: 0x4000, 0xc9c: 0x4000, 0xc9d: 0x4000,
+	0xc9e: 0x4000, 0xc9f: 0x4000, 0xca0: 0x4000, 0xca1: 0x4000, 0xca2: 0x4000, 0xca3: 0x4000,
+	0xca4: 0x4000, 0xca5: 0x4000, 0xca6: 0x4000, 0xca7: 0x4000, 0xca8: 0x4000, 0xca9: 0x4000,
+	0xcaa: 0x4000, 0xcab: 0x4000, 0xcac: 0x4000, 0xcad: 0x4000, 0xcae: 0x4000, 0xcaf: 0x4000,
+	0xcb0: 0x4000, 0xcb1: 0x4000, 0xcb2: 0x4000, 0xcb3: 0x4000, 0xcb4: 0x4000, 0xcb5: 0x4000,
+	0xcb6: 0x4000, 0xcb7: 0x4000, 0xcb8: 0x4000, 0xcb9: 0x4000, 0xcba: 0x4000, 0xcbb: 0x4000,
+	0xcbc: 0x4000, 0xcbd: 0x4000, 0xcbe: 0x4000,
+	// Block 0x33, offset 0xcc0
+	0xcc1: 0x4000, 0xcc2: 0x4000, 0xcc3: 0x4000, 0xcc4: 0x4000, 0xcc5: 0x4000,
+	0xcc6: 0x4000, 0xcc7: 0x4000, 0xcc8: 0x4000, 0xcc9: 0x4000, 0xcca: 0x4000, 0xccb: 0x4000,
+	0xccc: 0x4000, 0xccd: 0x4000, 0xcce: 0x4000, 0xccf: 0x4000, 0xcd0: 0x4000, 0xcd1: 0x4000,
+	0xcd2: 0x4000, 0xcd3: 0x4000, 0xcd4: 0x4000, 0xcd5: 0x4000, 0xcd6: 0x4000, 0xcd7: 0x4000,
+	0xcd8: 0x4000, 0xcd9: 0x4000, 0xcda: 0x4000, 0xcdb: 0x4000, 0xcdc: 0x4000, 0xcdd: 0x4000,
+	0xcde: 0x4000, 0xcdf: 0x4000, 0xce0: 0x4000, 0xce1: 0x4000, 0xce2: 0x4000, 0xce3: 0x4000,
+	0xce4: 0x4000, 0xce5: 0x4000, 0xce6: 0x4000, 0xce7: 0x4000, 0xce8: 0x4000, 0xce9: 0x4000,
+	0xcea: 0x4000, 0xceb: 0x4000, 0xcec: 0x4000, 0xced: 0x4000, 0xcee: 0x4000, 0xcef: 0x4000,
+	0xcf0: 0x4000, 0xcf1: 0x4000, 0xcf2: 0x4000, 0xcf3: 0x4000, 0xcf4: 0x4000, 0xcf5: 0x4000,
+	0xcf6: 0x4000, 0xcf7: 0x4000, 0xcf8: 0x4000, 0xcf9: 0x4000, 0xcfa: 0x4000, 0xcfb: 0x4000,
+	0xcfc: 0x4000, 0xcfd: 0x4000, 0xcfe: 0x4000, 0xcff: 0x4000,
+	// Block 0x34, offset 0xd00
+	0xd00: 0x4000, 0xd01: 0x4000, 0xd02: 0x4000, 0xd03: 0x4000, 0xd04: 0x4000, 0xd05: 0x4000,
+	0xd06: 0x4000, 0xd07: 0x4000, 0xd08: 0x4000, 0xd09: 0x4000, 0xd0a: 0x4000, 0xd0b: 0x4000,
+	0xd0c: 0x4000, 0xd0d: 0x4000, 0xd0e: 0x4000, 0xd0f: 0x4000, 0xd10: 0x4000, 0xd11: 0x4000,
+	0xd12: 0x4000, 0xd13: 0x4000, 0xd14: 0x4000, 0xd15: 0x4000, 0xd16: 0x4000,
+	0xd19: 0x4016, 0xd1a: 0x4017, 0xd1b: 0x4000, 0xd1c: 0x4000, 0xd1d: 0x4000,
+	0xd1e: 0x4000, 0xd1f: 0x4000, 0xd20: 0x4000, 0xd21: 0x4018, 0xd22: 0x4019, 0xd23: 0x401a,
+	0xd24: 0x401b, 0xd25: 0x401c, 0xd26: 0x401d, 0xd27: 0x401e, 0xd28: 0x401f, 0xd29: 0x4020,
+	0xd2a: 0x4021, 0xd2b: 0x4022, 0xd2c: 0x4000, 0xd2d: 0x4010, 0xd2e: 0x4000, 0xd2f: 0x4023,
+	0xd30: 0x4000, 0xd31: 0x4024, 0xd32: 0x4000, 0xd33: 0x4025, 0xd34: 0x4000, 0xd35: 0x4026,
+	0xd36: 0x4000, 0xd37: 0x401a, 0xd38: 0x4000, 0xd39: 0x4027, 0xd3a: 0x4000, 0xd3b: 0x4028,
+	0xd3c: 0x4000, 0xd3d: 0x4020, 0xd3e: 0x4000, 0xd3f: 0x4029,
+	// Block 0x35, offset 0xd40
+	0xd40: 0x4000, 0xd41: 0x402a, 0xd42: 0x4000, 0xd43: 0x402b, 0xd44: 0x402c, 0xd45: 0x4000,
+	0xd46: 0x4017, 0xd47: 0x4000, 0xd48: 0x402d, 0xd49: 0x4000, 0xd4a: 0x402e, 0xd4b: 0x402f,
+	0xd4c: 0x4030, 0xd4d: 0x4017, 0xd4e: 0x4016, 0xd4f: 0x4017, 0xd50: 0x4000, 0xd51: 0x4000,
+	0xd52: 0x4031, 0xd53: 0x4000, 0xd54: 0x4000, 0xd55: 0x4031, 0xd56: 0x4000, 0xd57: 0x4000,
+	0xd58: 0x4032, 0xd59: 0x4000, 0xd5a: 0x4000, 0xd5b: 0x4032, 0xd5c: 0x4000, 0xd5d: 0x4000,
+	0xd5e: 0x4033, 0xd5f: 0x402e, 0xd60: 0x4034, 0xd61: 0x4035, 0xd62: 0x4034, 0xd63: 0x4036,
+	0xd64: 0x4037, 0xd65: 0x4024, 0xd66: 0x4035, 0xd67: 0x4025, 0xd68: 0x4038, 0xd69: 0x4038,
+	0xd6a: 0x4039, 0xd6b: 0x4039, 0xd6c: 0x403a, 0xd6d: 0x403a, 0xd6e: 0x4000, 0xd6f: 0x4035,
+	0xd70: 0x4000, 0xd71: 0x4000, 0xd72: 0x403b, 0xd73: 0x403c, 0xd74: 0x4000, 0xd75: 0x4000,
+	0xd76: 0x4000, 0xd77: 0x4000, 0xd78: 0x4000, 0xd79: 0x4000, 0xd7a: 0x4000, 0xd7b: 0x403d,
+	0xd7c: 0x401c, 0xd7d: 0x4000, 0xd7e: 0x4000, 0xd7f: 0x4000,
+	// Block 0x36, offset 0xd80
+	0xd85: 0x4000,
+	0xd86: 0x4000, 0xd87: 0x4000, 0xd88: 0x4000, 0xd89: 0x4000, 0xd8a: 0x4000, 0xd8b: 0x4000,
+	0xd8c: 0x4000, 0xd8d: 0x4000, 0xd8e: 0x4000, 0xd8f: 0x4000, 0xd90: 0x4000, 0xd91: 0x4000,
+	0xd92: 0x4000, 0xd93: 0x4000, 0xd94: 0x4000, 0xd95: 0x4000, 0xd96: 0x4000, 0xd97: 0x4000,
+	0xd98: 0x4000, 0xd99: 0x4000, 0xd9a: 0x4000, 0xd9b: 0x4000, 0xd9c: 0x4000, 0xd9d: 0x4000,
+	0xd9e: 0x4000, 0xd9f: 0x4000, 0xda0: 0x4000, 0xda1: 0x4000, 0xda2: 0x4000, 0xda3: 0x4000,
+	0xda4: 0x4000, 0xda5: 0x4000, 0xda6: 0x4000, 0xda7: 0x4000, 0xda8: 0x4000, 0xda9: 0x4000,
+	0xdaa: 0x4000, 0xdab: 0x4000, 0xdac: 0x4000, 0xdad: 0x4000, 0xdae: 0x4000,
+	0xdb1: 0x403e, 0xdb2: 0x403e, 0xdb3: 0x403e, 0xdb4: 0x403e, 0xdb5: 0x403e,
+	0xdb6: 0x403e, 0xdb7: 0x403e, 0xdb8: 0x403e, 0xdb9: 0x403e, 0xdba: 0x403e, 0xdbb: 0x403e,
+	0xdbc: 0x403e, 0xdbd: 0x403e, 0xdbe: 0x403e, 0xdbf: 0x403e,
+	// Block 0x37, offset 0xdc0
+	0xdc0: 0x4037, 0xdc1: 0x4037, 0xdc2: 0x4037, 0xdc3: 0x4037, 0xdc4: 0x4037, 0xdc5: 0x4037,
+	0xdc6: 0x4037, 0xdc7: 0x4037, 0xdc8: 0x4037, 0xdc9: 0x4037, 0xdca: 0x4037, 0xdcb: 0x4037,
+	0xdcc: 0x4037, 0xdcd: 0x4037, 0xdce: 0x4037, 0xdcf: 0x400e, 0xdd0: 0x403f, 0xdd1: 0x4040,
+	0xdd2: 0x4041, 0xdd3: 0x4040, 0xdd4: 0x403f, 0xdd5: 0x4042, 0xdd6: 0x4043, 0xdd7: 0x4044,
+	0xdd8: 0x4040, 0xdd9: 0x4041, 0xdda: 0x4040, 0xddb: 0x4045, 0xddc: 0x4009, 0xddd: 0x4045,
+	0xdde: 0x4046, 0xddf: 0x4045, 0xde0: 0x4047, 0xde1: 0x400b, 0xde2: 0x400a, 0xde3: 0x400c,
+	0xde4: 0x4048, 0xde5: 0x4000, 0xde6: 0x4000, 0xde7: 0x4000, 0xde8: 0x4000, 0xde9: 0x4000,
+	0xdea: 0x4000, 0xdeb: 0x4000, 0xdec: 0x4000, 0xded: 0x4000, 0xdee: 0x4000, 0xdef: 0x4000,
+	0xdf0: 0x4000, 0xdf1: 0x4000, 0xdf2: 0x4000, 0xdf3: 0x4000, 0xdf4: 0x4000, 0xdf5: 0x4000,
+	0xdf6: 0x4000, 0xdf7: 0x4000, 0xdf8: 0x4000, 0xdf9: 0x4000, 0xdfa: 0x4000, 0xdfb: 0x4000,
+	0xdfc: 0x4000, 0xdfd: 0x4000, 0xdfe: 0x4000, 0xdff: 0x4000,
+	// Block 0x38, offset 0xe00
+	0xe00: 0x4000, 0xe01: 0x4000, 0xe02: 0x4000, 0xe03: 0x4000, 0xe04: 0x4000, 0xe05: 0x4000,
+	0xe06: 0x4000, 0xe07: 0x4000, 0xe08: 0x4000, 0xe09: 0x4000, 0xe0a: 0x4000, 0xe0b: 0x4000,
+	0xe0c: 0x4000, 0xe0d: 0x4000, 0xe0e: 0x4000, 0xe10: 0x4000, 0xe11: 0x4000,
+	0xe12: 0x4000, 0xe13: 0x4000, 0xe14: 0x4000, 0xe15: 0x4000, 0xe16: 0x4000, 0xe17: 0x4000,
+	0xe18: 0x4000, 0xe19: 0x4000, 0xe1a: 0x4000, 0xe1b: 0x4000, 0xe1c: 0x4000, 0xe1d: 0x4000,
+	0xe1e: 0x4000, 0xe1f: 0x4000, 0xe20: 0x4000, 0xe21: 0x4000, 0xe22: 0x4000, 0xe23: 0x4000,
+	0xe24: 0x4000, 0xe25: 0x4000, 0xe26: 0x4000, 0xe27: 0x4000, 0xe28: 0x4000, 0xe29: 0x4000,
+	0xe2a: 0x4000, 0xe2b: 0x4000, 0xe2c: 0x4000, 0xe2d: 0x4000, 0xe2e: 0x4000, 0xe2f: 0x4000,
+	0xe30: 0x4000, 0xe31: 0x4000, 0xe32: 0x4000, 0xe33: 0x4000, 0xe34: 0x4000, 0xe35: 0x4000,
+	0xe36: 0x4000, 0xe37: 0x4000, 0xe38: 0x4000, 0xe39: 0x4000, 0xe3a: 0x4000,
+	// Block 0x39, offset 0xe40
+	0xe40: 0x4000, 0xe41: 0x4000, 0xe42: 0x4000, 0xe43: 0x4000, 0xe44: 0x4000, 0xe45: 0x4000,
+	0xe46: 0x4000, 0xe47: 0x4000, 0xe48: 0x4000, 0xe49: 0x4000, 0xe4a: 0x4000, 0xe4b: 0x4000,
+	0xe4c: 0x4000, 0xe4d: 0x4000, 0xe4e: 0x4000, 0xe4f: 0x4000, 0xe50: 0x4000, 0xe51: 0x4000,
+	0xe52: 0x4000, 0xe53: 0x4000, 0xe54: 0x4000, 0xe55: 0x4000, 0xe56: 0x4000, 0xe57: 0x4000,
+	0xe58: 0x4000, 0xe59: 0x4000, 0xe5a: 0x4000, 0xe5b: 0x4000, 0xe5c: 0x4000, 0xe5d: 0x4000,
+	0xe5e: 0x4000, 0xe5f: 0x4000, 0xe60: 0x4000, 0xe61: 0x4000, 0xe62: 0x4000, 0xe63: 0x4000,
+	0xe70: 0x4000, 0xe71: 0x4000, 0xe72: 0x4000, 0xe73: 0x4000, 0xe74: 0x4000, 0xe75: 0x4000,
+	0xe76: 0x4000, 0xe77: 0x4000, 0xe78: 0x4000, 0xe79: 0x4000, 0xe7a: 0x4000, 0xe7b: 0x4000,
+	0xe7c: 0x4000, 0xe7d: 0x4000, 0xe7e: 0x4000, 0xe7f: 0x4000,
+	// Block 0x3a, offset 0xe80
+	0xe80: 0x4000, 0xe81: 0x4000, 0xe82: 0x4000, 0xe83: 0x4000, 0xe84: 0x4000, 0xe85: 0x4000,
+	0xe86: 0x4000, 0xe87: 0x4000, 0xe88: 0x4000, 0xe89: 0x4000, 0xe8a: 0x4000, 0xe8b: 0x4000,
+	0xe8c: 0x4000, 0xe8d: 0x4000, 0xe8e: 0x4000, 0xe8f: 0x4000, 0xe90: 0x4000, 0xe91: 0x4000,
+	0xe92: 0x4000, 0xe93: 0x4000, 0xe94: 0x4000, 0xe95: 0x4000, 0xe96: 0x4000, 0xe97: 0x4000,
+	0xe98: 0x4000, 0xe99: 0x4000, 0xe9a: 0x4000, 0xe9b: 0x4000, 0xe9c: 0x4000, 0xe9d: 0x4000,
+	0xe9e: 0x4000, 0xea0: 0x4000, 0xea1: 0x4000, 0xea2: 0x4000, 0xea3: 0x4000,
+	0xea4: 0x4000, 0xea5: 0x4000, 0xea6: 0x4000, 0xea7: 0x4000, 0xea8: 0x4000, 0xea9: 0x4000,
+	0xeaa: 0x4000, 0xeab: 0x4000, 0xeac: 0x4000, 0xead: 0x4000, 0xeae: 0x4000, 0xeaf: 0x4000,
+	0xeb0: 0x4000, 0xeb1: 0x4000, 0xeb2: 0x4000, 0xeb3: 0x4000, 0xeb4: 0x4000, 0xeb5: 0x4000,
+	0xeb6: 0x4000, 0xeb7: 0x4000, 0xeb8: 0x4000, 0xeb9: 0x4000, 0xeba: 0x4000, 0xebb: 0x4000,
+	0xebc: 0x4000, 0xebd: 0x4000, 0xebe: 0x4000, 0xebf: 0x4000,
+	// Block 0x3b, offset 0xec0
+	0xec0: 0x4000, 0xec1: 0x4000, 0xec2: 0x4000, 0xec3: 0x4000, 0xec4: 0x4000, 0xec5: 0x4000,
+	0xec6: 0x4000, 0xec7: 0x4000, 0xec8: 0x2000, 0xec9: 0x2000, 0xeca: 0x2000, 0xecb: 0x2000,
+	0xecc: 0x2000, 0xecd: 0x2000, 0xece: 0x2000, 0xecf: 0x2000, 0xed0: 0x4000, 0xed1: 0x4000,
+	0xed2: 0x4000, 0xed3: 0x4000, 0xed4: 0x4000, 0xed5: 0x4000, 0xed6: 0x4000, 0xed7: 0x4000,
+	0xed8: 0x4000, 0xed9: 0x4000, 0xeda: 0x4000, 0xedb: 0x4000, 0xedc: 0x4000, 0xedd: 0x4000,
+	0xede: 0x4000, 0xedf: 0x4000, 0xee0: 0x4000, 0xee1: 0x4000, 0xee2: 0x4000, 0xee3: 0x4000,
+	0xee4: 0x4000, 0xee5: 0x4000, 0xee6: 0x4000, 0xee7: 0x4000, 0xee8: 0x4000, 0xee9: 0x4000,
+	0xeea: 0x4000, 0xeeb: 0x4000, 0xeec: 0x4000, 0xeed: 0x4000, 0xeee: 0x4000, 0xeef: 0x4000,
+	0xef0: 0x4000, 0xef1: 0x4000, 0xef2: 0x4000, 0xef3: 0x4000, 0xef4: 0x4000, 0xef5: 0x4000,
+	0xef6: 0x4000, 0xef7: 0x4000, 0xef8: 0x4000, 0xef9: 0x4000, 0xefa: 0x4000, 0xefb: 0x4000,
+	0xefc: 0x4000, 0xefd: 0x4000, 0xefe: 0x4000, 0xeff: 0x4000,
+	// Block 0x3c, offset 0xf00
+	0xf00: 0x4000, 0xf01: 0x4000, 0xf02: 0x4000, 0xf03: 0x4000, 0xf04: 0x4000, 0xf05: 0x4000,
+	0xf06: 0x4000, 0xf07: 0x4000, 0xf08: 0x4000, 0xf09: 0x4000, 0xf0a: 0x4000, 0xf0b: 0x4000,
+	0xf0c: 0x4000, 0xf0d: 0x4000, 0xf0e: 0x4000, 0xf0f: 0x4000, 0xf10: 0x4000, 0xf11: 0x4000,
+	0xf12: 0x4000, 0xf13: 0x4000, 0xf14: 0x4000, 0xf15: 0x4000, 0xf16: 0x4000, 0xf17: 0x4000,
+	0xf18: 0x4000, 0xf19: 0x4000, 0xf1a: 0x4000, 0xf1b: 0x4000, 0xf1c: 0x4000, 0xf1d: 0x4000,
+	0xf1e: 0x4000, 0xf1f: 0x4000, 0xf20: 0x4000, 0xf21: 0x4000, 0xf22: 0x4000, 0xf23: 0x4000,
+	0xf24: 0x4000, 0xf25: 0x4000, 0xf26: 0x4000, 0xf27: 0x4000, 0xf28: 0x4000, 0xf29: 0x4000,
+	0xf2a: 0x4000, 0xf2b: 0x4000, 0xf2c: 0x4000, 0xf2d: 0x4000, 0xf2e: 0x4000, 0xf2f: 0x4000,
+	0xf30: 0x4000, 0xf31: 0x4000, 0xf32: 0x4000, 0xf33: 0x4000, 0xf34: 0x4000, 0xf35: 0x4000,
+	0xf36: 0x4000, 0xf37: 0x4000, 0xf38: 0x4000, 0xf39: 0x4000, 0xf3a: 0x4000, 0xf3b: 0x4000,
+	0xf3c: 0x4000, 0xf3d: 0x4000, 0xf3e: 0x4000,
+	// Block 0x3d, offset 0xf40
+	0xf40: 0x4000, 0xf41: 0x4000, 0xf42: 0x4000, 0xf43: 0x4000, 0xf44: 0x4000, 0xf45: 0x4000,
+	0xf46: 0x4000, 0xf47: 0x4000, 0xf48: 0x4000, 0xf49: 0x4000, 0xf4a: 0x4000, 0xf4b: 0x4000,
+	0xf4c: 0x4000, 0xf50: 0x4000, 0xf51: 0x4000,
+	0xf52: 0x4000, 0xf53: 0x4000, 0xf54: 0x4000, 0xf55: 0x4000, 0xf56: 0x4000, 0xf57: 0x4000,
+	0xf58: 0x4000, 0xf59: 0x4000, 0xf5a: 0x4000, 0xf5b: 0x4000, 0xf5c: 0x4000, 0xf5d: 0x4000,
+	0xf5e: 0x4000, 0xf5f: 0x4000, 0xf60: 0x4000, 0xf61: 0x4000, 0xf62: 0x4000, 0xf63: 0x4000,
+	0xf64: 0x4000, 0xf65: 0x4000, 0xf66: 0x4000, 0xf67: 0x4000, 0xf68: 0x4000, 0xf69: 0x4000,
+	0xf6a: 0x4000, 0xf6b: 0x4000, 0xf6c: 0x4000, 0xf6d: 0x4000, 0xf6e: 0x4000, 0xf6f: 0x4000,
+	0xf70: 0x4000, 0xf71: 0x4000, 0xf72: 0x4000, 0xf73: 0x4000, 0xf74: 0x4000, 0xf75: 0x4000,
+	0xf76: 0x4000, 0xf77: 0x4000, 0xf78: 0x4000, 0xf79: 0x4000, 0xf7a: 0x4000, 0xf7b: 0x4000,
+	0xf7c: 0x4000, 0xf7d: 0x4000, 0xf7e: 0x4000, 0xf7f: 0x4000,
+	// Block 0x3e, offset 0xf80
+	0xf80: 0x4000, 0xf81: 0x4000, 0xf82: 0x4000, 0xf83: 0x4000, 0xf84: 0x4000, 0xf85: 0x4000,
+	0xf86: 0x4000,
+	// Block 0x3f, offset 0xfc0
+	0xfe0: 0x4000, 0xfe1: 0x4000, 0xfe2: 0x4000, 0xfe3: 0x4000,
+	0xfe4: 0x4000, 0xfe5: 0x4000, 0xfe6: 0x4000, 0xfe7: 0x4000, 0xfe8: 0x4000, 0xfe9: 0x4000,
+	0xfea: 0x4000, 0xfeb: 0x4000, 0xfec: 0x4000, 0xfed: 0x4000, 0xfee: 0x4000, 0xfef: 0x4000,
+	0xff0: 0x4000, 0xff1: 0x4000, 0xff2: 0x4000, 0xff3: 0x4000, 0xff4: 0x4000, 0xff5: 0x4000,
+	0xff6: 0x4000, 0xff7: 0x4000, 0xff8: 0x4000, 0xff9: 0x4000, 0xffa: 0x4000, 0xffb: 0x4000,
+	0xffc: 0x4000,
+	// Block 0x40, offset 0x1000
+	0x1000: 0x4000, 0x1001: 0x4000, 0x1002: 0x4000, 0x1003: 0x4000, 0x1004: 0x4000, 0x1005: 0x4000,
+	0x1006: 0x4000, 0x1007: 0x4000, 0x1008: 0x4000, 0x1009: 0x4000, 0x100a: 0x4000, 0x100b: 0x4000,
+	0x100c: 0x4000, 0x100d: 0x4000, 0x100e: 0x4000, 0x100f: 0x4000, 0x1010: 0x4000, 0x1011: 0x4000,
+	0x1012: 0x4000, 0x1013: 0x4000, 0x1014: 0x4000, 0x1015: 0x4000, 0x1016: 0x4000, 0x1017: 0x4000,
+	0x1018: 0x4000, 0x1019: 0x4000, 0x101a: 0x4000, 0x101b: 0x4000, 0x101c: 0x4000, 0x101d: 0x4000,
+	0x101e: 0x4000, 0x101f: 0x4000, 0x1020: 0x4000, 0x1021: 0x4000, 0x1022: 0x4000, 0x1023: 0x4000,
+	// Block 0x41, offset 0x1040
+	0x1040: 0x2000, 0x1041: 0x2000, 0x1042: 0x2000, 0x1043: 0x2000, 0x1044: 0x2000, 0x1045: 0x2000,
+	0x1046: 0x2000, 0x1047: 0x2000, 0x1048: 0x2000, 0x1049: 0x2000, 0x104a: 0x2000, 0x104b: 0x2000,
+	0x104c: 0x2000, 0x104d: 0x2000, 0x104e: 0x2000, 0x104f: 0x2000, 0x1050: 0x4000, 0x1051: 0x4000,
+	0x1052: 0x4000, 0x1053: 0x4000, 0x1054: 0x4000, 0x1055: 0x4000, 0x1056: 0x4000, 0x1057: 0x4000,
+	0x1058: 0x4000, 0x1059: 0x4000,
+	0x1070: 0x4000, 0x1071: 0x4000, 0x1072: 0x4000, 0x1073: 0x4000, 0x1074: 0x4000, 0x1075: 0x4000,
+	0x1076: 0x4000, 0x1077: 0x4000, 0x1078: 0x4000, 0x1079: 0x4000, 0x107a: 0x4000, 0x107b: 0x4000,
+	0x107c: 0x4000, 0x107d: 0x4000, 0x107e: 0x4000, 0x107f: 0x4000,
+	// Block 0x42, offset 0x1080
+	0x1080: 0x4000, 0x1081: 0x4000, 0x1082: 0x4000, 0x1083: 0x4000, 0x1084: 0x4000, 0x1085: 0x4000,
+	0x1086: 0x4000, 0x1087: 0x4000, 0x1088: 0x4000, 0x1089: 0x4000, 0x108a: 0x4000, 0x108b: 0x4000,
+	0x108c: 0x4000, 0x108d: 0x4000, 0x108e: 0x4000, 0x108f: 0x4000, 0x1090: 0x4000, 0x1091: 0x4000,
+	0x1092: 0x4000, 0x1094: 0x4000, 0x1095: 0x4000, 0x1096: 0x4000, 0x1097: 0x4000,
+	0x1098: 0x4000, 0x1099: 0x4000, 0x109a: 0x4000, 0x109b: 0x4000, 0x109c: 0x4000, 0x109d: 0x4000,
+	0x109e: 0x4000, 0x109f: 0x4000, 0x10a0: 0x4000, 0x10a1: 0x4000, 0x10a2: 0x4000, 0x10a3: 0x4000,
+	0x10a4: 0x4000, 0x10a5: 0x4000, 0x10a6: 0x4000, 0x10a8: 0x4000, 0x10a9: 0x4000,
+	0x10aa: 0x4000, 0x10ab: 0x4000,
+	// Block 0x43, offset 0x10c0
+	0x10c1: 0x9012, 0x10c2: 0x9012, 0x10c3: 0x9012, 0x10c4: 0x9012, 0x10c5: 0x9012,
+	0x10c6: 0x9012, 0x10c7: 0x9012, 0x10c8: 0x9012, 0x10c9: 0x9012, 0x10ca: 0x9012, 0x10cb: 0x9012,
+	0x10cc: 0x9012, 0x10cd: 0x9012, 0x10ce: 0x9012, 0x10cf: 0x9012, 0x10d0: 0x9012, 0x10d1: 0x9012,
+	0x10d2: 0x9012, 0x10d3: 0x9012, 0x10d4: 0x9012, 0x10d5: 0x9012, 0x10d6: 0x9012, 0x10d7: 0x9012,
+	0x10d8: 0x9012, 0x10d9: 0x9012, 0x10da: 0x9012, 0x10db: 0x9012, 0x10dc: 0x9012, 0x10dd: 0x9012,
+	0x10de: 0x9012, 0x10df: 0x9012, 0x10e0: 0x9049, 0x10e1: 0x9049, 0x10e2: 0x9049, 0x10e3: 0x9049,
+	0x10e4: 0x9049, 0x10e5: 0x9049, 0x10e6: 0x9049, 0x10e7: 0x9049, 0x10e8: 0x9049, 0x10e9: 0x9049,
+	0x10ea: 0x9049, 0x10eb: 0x9049, 0x10ec: 0x9049, 0x10ed: 0x9049, 0x10ee: 0x9049, 0x10ef: 0x9049,
+	0x10f0: 0x9049, 0x10f1: 0x9049, 0x10f2: 0x9049, 0x10f3: 0x9049, 0x10f4: 0x9049, 0x10f5: 0x9049,
+	0x10f6: 0x9049, 0x10f7: 0x9049, 0x10f8: 0x9049, 0x10f9: 0x9049, 0x10fa: 0x9049, 0x10fb: 0x9049,
+	0x10fc: 0x9049, 0x10fd: 0x9049, 0x10fe: 0x9049, 0x10ff: 0x9049,
+	// Block 0x44, offset 0x1100
+	0x1100: 0x9049, 0x1101: 0x9049, 0x1102: 0x9049, 0x1103: 0x9049, 0x1104: 0x9049, 0x1105: 0x9049,
+	0x1106: 0x9049, 0x1107: 0x9049, 0x1108: 0x9049, 0x1109: 0x9049, 0x110a: 0x9049, 0x110b: 0x9049,
+	0x110c: 0x9049, 0x110d: 0x9049, 0x110e: 0x9049, 0x110f: 0x9049, 0x1110: 0x9049, 0x1111: 0x9049,
+	0x1112: 0x9049, 0x1113: 0x9049, 0x1114: 0x9049, 0x1115: 0x9049, 0x1116: 0x9049, 0x1117: 0x9049,
+	0x1118: 0x9049, 0x1119: 0x9049, 0x111a: 0x9049, 0x111b: 0x9049, 0x111c: 0x9049, 0x111d: 0x9049,
+	0x111e: 0x9049, 0x111f: 0x904a, 0x1120: 0x904b, 0x1121: 0xb04c, 0x1122: 0xb04d, 0x1123: 0xb04d,
+	0x1124: 0xb04e, 0x1125: 0xb04f, 0x1126: 0xb050, 0x1127: 0xb051, 0x1128: 0xb052, 0x1129: 0xb053,
+	0x112a: 0xb054, 0x112b: 0xb055, 0x112c: 0xb056, 0x112d: 0xb057, 0x112e: 0xb058, 0x112f: 0xb059,
+	0x1130: 0xb05a, 0x1131: 0xb05b, 0x1132: 0xb05c, 0x1133: 0xb05d, 0x1134: 0xb05e, 0x1135: 0xb05f,
+	0x1136: 0xb060, 0x1137: 0xb061, 0x1138: 0xb062, 0x1139: 0xb063, 0x113a: 0xb064, 0x113b: 0xb065,
+	0x113c: 0xb052, 0x113d: 0xb066, 0x113e: 0xb067, 0x113f: 0xb055,
+	// Block 0x45, offset 0x1140
+	0x1140: 0xb068, 0x1141: 0xb069, 0x1142: 0xb06a, 0x1143: 0xb06b, 0x1144: 0xb05a, 0x1145: 0xb056,
+	0x1146: 0xb06c, 0x1147: 0xb06d, 0x1148: 0xb06b, 0x1149: 0xb06e, 0x114a: 0xb06b, 0x114b: 0xb06f,
+	0x114c: 0xb06f, 0x114d: 0xb070, 0x114e: 0xb070, 0x114f: 0xb071, 0x1150: 0xb056, 0x1151: 0xb072,
+	0x1152: 0xb073, 0x1153: 0xb072, 0x1154: 0xb074, 0x1155: 0xb073, 0x1156: 0xb075, 0x1157: 0xb075,
+	0x1158: 0xb076, 0x1159: 0xb076, 0x115a: 0xb077, 0x115b: 0xb077, 0x115c: 0xb073, 0x115d: 0xb078,
+	0x115e: 0xb079, 0x115f: 0xb067, 0x1160: 0xb07a, 0x1161: 0xb07b, 0x1162: 0xb07b, 0x1163: 0xb07b,
+	0x1164: 0xb07b, 0x1165: 0xb07b, 0x1166: 0xb07b, 0x1167: 0xb07b, 0x1168: 0xb07b, 0x1169: 0xb07b,
+	0x116a: 0xb07b, 0x116b: 0xb07b, 0x116c: 0xb07b, 0x116d: 0xb07b, 0x116e: 0xb07b, 0x116f: 0xb07b,
+	0x1170: 0xb07c, 0x1171: 0xb07c, 0x1172: 0xb07c, 0x1173: 0xb07c, 0x1174: 0xb07c, 0x1175: 0xb07c,
+	0x1176: 0xb07c, 0x1177: 0xb07c, 0x1178: 0xb07c, 0x1179: 0xb07c, 0x117a: 0xb07c, 0x117b: 0xb07c,
+	0x117c: 0xb07c, 0x117d: 0xb07c, 0x117e: 0xb07c,
+	// Block 0x46, offset 0x1180
+	0x1182: 0xb07d, 0x1183: 0xb07e, 0x1184: 0xb07f, 0x1185: 0xb080,
+	0x1186: 0xb07f, 0x1187: 0xb07e, 0x118a: 0xb081, 0x118b: 0xb082,
+	0x118c: 0xb083, 0x118d: 0xb07f, 0x118e: 0xb080, 0x118f: 0xb07f,
+	0x1192: 0xb084, 0x1193: 0xb085, 0x1194: 0xb084, 0x1195: 0xb086, 0x1196: 0xb084, 0x1197: 0xb087,
+	0x119a: 0xb088, 0x119b: 0xb089, 0x119c: 0xb08a,
+	0x11a0: 0x908b, 0x11a1: 0x908b, 0x11a2: 0x908c, 0x11a3: 0x908d,
+	0x11a4: 0x908b, 0x11a5: 0x908e, 0x11a6: 0x908f, 0x11a8: 0xb090, 0x11a9: 0xb091,
+	0x11aa: 0xb092, 0x11ab: 0xb091, 0x11ac: 0xb093, 0x11ad: 0xb094, 0x11ae: 0xb095,
+	0x11bd: 0x2000,
+	// Block 0x47, offset 0x11c0
+	0x11e0: 0x4000, 0x11e1: 0x4000,
+	// Block 0x48, offset 0x1200
+	0x1200: 0x4000, 0x1201: 0x4000, 0x1202: 0x4000, 0x1203: 0x4000, 0x1204: 0x4000, 0x1205: 0x4000,
+	0x1206: 0x4000, 0x1207: 0x4000, 0x1208: 0x4000, 0x1209: 0x4000, 0x120a: 0x4000, 0x120b: 0x4000,
+	0x120c: 0x4000, 0x120d: 0x4000, 0x120e: 0x4000, 0x120f: 0x4000, 0x1210: 0x4000, 0x1211: 0x4000,
+	0x1212: 0x4000, 0x1213: 0x4000, 0x1214: 0x4000, 0x1215: 0x4000, 0x1216: 0x4000, 0x1217: 0x4000,
+	0x1218: 0x4000, 0x1219: 0x4000, 0x121a: 0x4000, 0x121b: 0x4000, 0x121c: 0x4000, 0x121d: 0x4000,
+	0x121e: 0x4000, 0x121f: 0x4000, 0x1220: 0x4000, 0x1221: 0x4000, 0x1222: 0x4000, 0x1223: 0x4000,
+	0x1224: 0x4000, 0x1225: 0x4000, 0x1226: 0x4000, 0x1227: 0x4000, 0x1228: 0x4000, 0x1229: 0x4000,
+	0x122a: 0x4000, 0x122b: 0x4000, 0x122c: 0x4000,
+	// Block 0x49, offset 0x1240
+	0x1240: 0x4000, 0x1241: 0x4000, 0x1242: 0x4000, 0x1243: 0x4000, 0x1244: 0x4000, 0x1245: 0x4000,
+	0x1246: 0x4000, 0x1247: 0x4000, 0x1248: 0x4000, 0x1249: 0x4000, 0x124a: 0x4000, 0x124b: 0x4000,
+	0x124c: 0x4000, 0x124d: 0x4000, 0x124e: 0x4000, 0x124f: 0x4000, 0x1250: 0x4000, 0x1251: 0x4000,
+	0x1252: 0x4000, 0x1253: 0x4000, 0x1254: 0x4000, 0x1255: 0x4000, 0x1256: 0x4000, 0x1257: 0x4000,
+	0x1258: 0x4000, 0x1259: 0x4000, 0x125a: 0x4000, 0x125b: 0x4000, 0x125c: 0x4000, 0x125d: 0x4000,
+	0x125e: 0x4000, 0x125f: 0x4000, 0x1260: 0x4000, 0x1261: 0x4000, 0x1262: 0x4000, 0x1263: 0x4000,
+	0x1264: 0x4000, 0x1265: 0x4000, 0x1266: 0x4000, 0x1267: 0x4000, 0x1268: 0x4000, 0x1269: 0x4000,
+	0x126a: 0x4000, 0x126b: 0x4000, 0x126c: 0x4000, 0x126d: 0x4000, 0x126e: 0x4000, 0x126f: 0x4000,
+	0x1270: 0x4000, 0x1271: 0x4000, 0x1272: 0x4000,
+	// Block 0x4a, offset 0x1280
+	0x1280: 0x4000, 0x1281: 0x4000, 0x1282: 0x4000, 0x1283: 0x4000, 0x1284: 0x4000, 0x1285: 0x4000,
+	0x1286: 0x4000, 0x1287: 0x4000, 0x1288: 0x4000, 0x1289: 0x4000, 0x128a: 0x4000, 0x128b: 0x4000,
+	0x128c: 0x4000, 0x128d: 0x4000, 0x128e: 0x4000, 0x128f: 0x4000, 0x1290: 0x4000, 0x1291: 0x4000,
+	0x1292: 0x4000, 0x1293: 0x4000, 0x1294: 0x4000, 0x1295: 0x4000, 0x1296: 0x4000, 0x1297: 0x4000,
+	0x1298: 0x4000, 0x1299: 0x4000, 0x129a: 0x4000, 0x129b: 0x4000, 0x129c: 0x4000, 0x129d: 0x4000,
+	0x129e: 0x4000,
+	// Block 0x4b, offset 0x12c0
+	0x12f0: 0x4000, 0x12f1: 0x4000, 0x12f2: 0x4000, 0x12f3: 0x4000, 0x12f4: 0x4000, 0x12f5: 0x4000,
+	0x12f6: 0x4000, 0x12f7: 0x4000, 0x12f8: 0x4000, 0x12f9: 0x4000, 0x12fa: 0x4000, 0x12fb: 0x4000,
+	0x12fc: 0x4000, 0x12fd: 0x4000, 0x12fe: 0x4000, 0x12ff: 0x4000,
+	// Block 0x4c, offset 0x1300
+	0x1300: 0x4000, 0x1301: 0x4000, 0x1302: 0x4000, 0x1303: 0x4000, 0x1304: 0x4000, 0x1305: 0x4000,
+	0x1306: 0x4000, 0x1307: 0x4000, 0x1308: 0x4000, 0x1309: 0x4000, 0x130a: 0x4000, 0x130b: 0x4000,
+	0x130c: 0x4000, 0x130d: 0x4000, 0x130e: 0x4000, 0x130f: 0x4000, 0x1310: 0x4000, 0x1311: 0x4000,
+	0x1312: 0x4000, 0x1313: 0x4000, 0x1314: 0x4000, 0x1315: 0x4000, 0x1316: 0x4000, 0x1317: 0x4000,
+	0x1318: 0x4000, 0x1319: 0x4000, 0x131a: 0x4000, 0x131b: 0x4000, 0x131c: 0x4000, 0x131d: 0x4000,
+	0x131e: 0x4000, 0x131f: 0x4000, 0x1320: 0x4000, 0x1321: 0x4000, 0x1322: 0x4000, 0x1323: 0x4000,
+	0x1324: 0x4000, 0x1325: 0x4000, 0x1326: 0x4000, 0x1327: 0x4000, 0x1328: 0x4000, 0x1329: 0x4000,
+	0x132a: 0x4000, 0x132b: 0x4000, 0x132c: 0x4000, 0x132d: 0x4000, 0x132e: 0x4000, 0x132f: 0x4000,
+	0x1330: 0x4000, 0x1331: 0x4000, 0x1332: 0x4000, 0x1333: 0x4000, 0x1334: 0x4000, 0x1335: 0x4000,
+	0x1336: 0x4000, 0x1337: 0x4000, 0x1338: 0x4000, 0x1339: 0x4000, 0x133a: 0x4000, 0x133b: 0x4000,
+	// Block 0x4d, offset 0x1340
+	0x1344: 0x4000,
+	// Block 0x4e, offset 0x1380
+	0x138f: 0x4000,
+	// Block 0x4f, offset 0x13c0
+	0x13c0: 0x2000, 0x13c1: 0x2000, 0x13c2: 0x2000, 0x13c3: 0x2000, 0x13c4: 0x2000, 0x13c5: 0x2000,
+	0x13c6: 0x2000, 0x13c7: 0x2000, 0x13c8: 0x2000, 0x13c9: 0x2000, 0x13ca: 0x2000,
+	0x13d0: 0x2000, 0x13d1: 0x2000,
+	0x13d2: 0x2000, 0x13d3: 0x2000, 0x13d4: 0x2000, 0x13d5: 0x2000, 0x13d6: 0x2000, 0x13d7: 0x2000,
+	0x13d8: 0x2000, 0x13d9: 0x2000, 0x13da: 0x2000, 0x13db: 0x2000, 0x13dc: 0x2000, 0x13dd: 0x2000,
+	0x13de: 0x2000, 0x13df: 0x2000, 0x13e0: 0x2000, 0x13e1: 0x2000, 0x13e2: 0x2000, 0x13e3: 0x2000,
+	0x13e4: 0x2000, 0x13e5: 0x2000, 0x13e6: 0x2000, 0x13e7: 0x2000, 0x13e8: 0x2000, 0x13e9: 0x2000,
+	0x13ea: 0x2000, 0x13eb: 0x2000, 0x13ec: 0x2000, 0x13ed: 0x2000,
+	0x13f0: 0x2000, 0x13f1: 0x2000, 0x13f2: 0x2000, 0x13f3: 0x2000, 0x13f4: 0x2000, 0x13f5: 0x2000,
+	0x13f6: 0x2000, 0x13f7: 0x2000, 0x13f8: 0x2000, 0x13f9: 0x2000, 0x13fa: 0x2000, 0x13fb: 0x2000,
+	0x13fc: 0x2000, 0x13fd: 0x2000, 0x13fe: 0x2000, 0x13ff: 0x2000,
+	// Block 0x50, offset 0x1400
+	0x1400: 0x2000, 0x1401: 0x2000, 0x1402: 0x2000, 0x1403: 0x2000, 0x1404: 0x2000, 0x1405: 0x2000,
+	0x1406: 0x2000, 0x1407: 0x2000, 0x1408: 0x2000, 0x1409: 0x2000, 0x140a: 0x2000, 0x140b: 0x2000,
+	0x140c: 0x2000, 0x140d: 0x2000, 0x140e: 0x2000, 0x140f: 0x2000, 0x1410: 0x2000, 0x1411: 0x2000,
+	0x1412: 0x2000, 0x1413: 0x2000, 0x1414: 0x2000, 0x1415: 0x2000, 0x1416: 0x2000, 0x1417: 0x2000,
+	0x1418: 0x2000, 0x1419: 0x2000, 0x141a: 0x2000, 0x141b: 0x2000, 0x141c: 0x2000, 0x141d: 0x2000,
+	0x141e: 0x2000, 0x141f: 0x2000, 0x1420: 0x2000, 0x1421: 0x2000, 0x1422: 0x2000, 0x1423: 0x2000,
+	0x1424: 0x2000, 0x1425: 0x2000, 0x1426: 0x2000, 0x1427: 0x2000, 0x1428: 0x2000, 0x1429: 0x2000,
+	0x1430: 0x2000, 0x1431: 0x2000, 0x1432: 0x2000, 0x1433: 0x2000, 0x1434: 0x2000, 0x1435: 0x2000,
+	0x1436: 0x2000, 0x1437: 0x2000, 0x1438: 0x2000, 0x1439: 0x2000, 0x143a: 0x2000, 0x143b: 0x2000,
+	0x143c: 0x2000, 0x143d: 0x2000, 0x143e: 0x2000, 0x143f: 0x2000,
+	// Block 0x51, offset 0x1440
+	0x1440: 0x2000, 0x1441: 0x2000, 0x1442: 0x2000, 0x1443: 0x2000, 0x1444: 0x2000, 0x1445: 0x2000,
+	0x1446: 0x2000, 0x1447: 0x2000, 0x1448: 0x2000, 0x1449: 0x2000, 0x144a: 0x2000, 0x144b: 0x2000,
+	0x144c: 0x2000, 0x144d: 0x2000, 0x144e: 0x4000, 0x144f: 0x2000, 0x1450: 0x2000, 0x1451: 0x4000,
+	0x1452: 0x4000, 0x1453: 0x4000, 0x1454: 0x4000, 0x1455: 0x4000, 0x1456: 0x4000, 0x1457: 0x4000,
+	0x1458: 0x4000, 0x1459: 0x4000, 0x145a: 0x4000, 0x145b: 0x2000, 0x145c: 0x2000, 0x145d: 0x2000,
+	0x145e: 0x2000, 0x145f: 0x2000, 0x1460: 0x2000, 0x1461: 0x2000, 0x1462: 0x2000, 0x1463: 0x2000,
+	0x1464: 0x2000, 0x1465: 0x2000, 0x1466: 0x2000, 0x1467: 0x2000, 0x1468: 0x2000, 0x1469: 0x2000,
+	0x146a: 0x2000, 0x146b: 0x2000, 0x146c: 0x2000,
+	// Block 0x52, offset 0x1480
+	0x1480: 0x4000, 0x1481: 0x4000, 0x1482: 0x4000,
+	0x1490: 0x4000, 0x1491: 0x4000,
+	0x1492: 0x4000, 0x1493: 0x4000, 0x1494: 0x4000, 0x1495: 0x4000, 0x1496: 0x4000, 0x1497: 0x4000,
+	0x1498: 0x4000, 0x1499: 0x4000, 0x149a: 0x4000, 0x149b: 0x4000, 0x149c: 0x4000, 0x149d: 0x4000,
+	0x149e: 0x4000, 0x149f: 0x4000, 0x14a0: 0x4000, 0x14a1: 0x4000, 0x14a2: 0x4000, 0x14a3: 0x4000,
+	0x14a4: 0x4000, 0x14a5: 0x4000, 0x14a6: 0x4000, 0x14a7: 0x4000, 0x14a8: 0x4000, 0x14a9: 0x4000,
+	0x14aa: 0x4000, 0x14ab: 0x4000, 0x14ac: 0x4000, 0x14ad: 0x4000, 0x14ae: 0x4000, 0x14af: 0x4000,
+	0x14b0: 0x4000, 0x14b1: 0x4000, 0x14b2: 0x4000, 0x14b3: 0x4000, 0x14b4: 0x4000, 0x14b5: 0x4000,
+	0x14b6: 0x4000, 0x14b7: 0x4000, 0x14b8: 0x4000, 0x14b9: 0x4000, 0x14ba: 0x4000, 0x14bb: 0x4000,
+	// Block 0x53, offset 0x14c0
+	0x14c0: 0x4000, 0x14c1: 0x4000, 0x14c2: 0x4000, 0x14c3: 0x4000, 0x14c4: 0x4000, 0x14c5: 0x4000,
+	0x14c6: 0x4000, 0x14c7: 0x4000, 0x14c8: 0x4000,
+	0x14d0: 0x4000, 0x14d1: 0x4000,
+	0x14e0: 0x4000, 0x14e1: 0x4000, 0x14e2: 0x4000, 0x14e3: 0x4000,
+	0x14e4: 0x4000, 0x14e5: 0x4000,
+	// Block 0x54, offset 0x1500
+	0x1500: 0x4000, 0x1501: 0x4000, 0x1502: 0x4000, 0x1503: 0x4000, 0x1504: 0x4000, 0x1505: 0x4000,
+	0x1506: 0x4000, 0x1507: 0x4000, 0x1508: 0x4000, 0x1509: 0x4000, 0x150a: 0x4000, 0x150b: 0x4000,
+	0x150c: 0x4000, 0x150d: 0x4000, 0x150e: 0x4000, 0x150f: 0x4000, 0x1510: 0x4000, 0x1511: 0x4000,
+	0x1512: 0x4000, 0x1513: 0x4000, 0x1514: 0x4000, 0x1515: 0x4000, 0x1516: 0x4000, 0x1517: 0x4000,
+	0x1518: 0x4000, 0x1519: 0x4000, 0x151a: 0x4000, 0x151b: 0x4000, 0x151c: 0x4000, 0x151d: 0x4000,
+	0x151e: 0x4000, 0x151f: 0x4000, 0x1520: 0x4000,
+	0x152d: 0x4000, 0x152e: 0x4000, 0x152f: 0x4000,
+	0x1530: 0x4000, 0x1531: 0x4000, 0x1532: 0x4000, 0x1533: 0x4000, 0x1534: 0x4000, 0x1535: 0x4000,
+	0x1537: 0x4000, 0x1538: 0x4000, 0x1539: 0x4000, 0x153a: 0x4000, 0x153b: 0x4000,
+	0x153c: 0x4000, 0x153d: 0x4000, 0x153e: 0x4000, 0x153f: 0x4000,
+	// Block 0x55, offset 0x1540
+	0x1540: 0x4000, 0x1541: 0x4000, 0x1542: 0x4000, 0x1543: 0x4000, 0x1544: 0x4000, 0x1545: 0x4000,
+	0x1546: 0x4000, 0x1547: 0x4000, 0x1548: 0x4000, 0x1549: 0x4000, 0x154a: 0x4000, 0x154b: 0x4000,
+	0x154c: 0x4000, 0x154d: 0x4000, 0x154e: 0x4000, 0x154f: 0x4000, 0x1550: 0x4000, 0x1551: 0x4000,
+	0x1552: 0x4000, 0x1553: 0x4000, 0x1554: 0x4000, 0x1555: 0x4000, 0x1556: 0x4000, 0x1557: 0x4000,
+	0x1558: 0x4000, 0x1559: 0x4000, 0x155a: 0x4000, 0x155b: 0x4000, 0x155c: 0x4000, 0x155d: 0x4000,
+	0x155e: 0x4000, 0x155f: 0x4000, 0x1560: 0x4000, 0x1561: 0x4000, 0x1562: 0x4000, 0x1563: 0x4000,
+	0x1564: 0x4000, 0x1565: 0x4000, 0x1566: 0x4000, 0x1567: 0x4000, 0x1568: 0x4000, 0x1569: 0x4000,
+	0x156a: 0x4000, 0x156b: 0x4000, 0x156c: 0x4000, 0x156d: 0x4000, 0x156e: 0x4000, 0x156f: 0x4000,
+	0x1570: 0x4000, 0x1571: 0x4000, 0x1572: 0x4000, 0x1573: 0x4000, 0x1574: 0x4000, 0x1575: 0x4000,
+	0x1576: 0x4000, 0x1577: 0x4000, 0x1578: 0x4000, 0x1579: 0x4000, 0x157a: 0x4000, 0x157b: 0x4000,
+	0x157c: 0x4000, 0x157e: 0x4000, 0x157f: 0x4000,
+	// Block 0x56, offset 0x1580
+	0x1580: 0x4000, 0x1581: 0x4000, 0x1582: 0x4000, 0x1583: 0x4000, 0x1584: 0x4000, 0x1585: 0x4000,
+	0x1586: 0x4000, 0x1587: 0x4000, 0x1588: 0x4000, 0x1589: 0x4000, 0x158a: 0x4000, 0x158b: 0x4000,
+	0x158c: 0x4000, 0x158d: 0x4000, 0x158e: 0x4000, 0x158f: 0x4000, 0x1590: 0x4000, 0x1591: 0x4000,
+	0x1592: 0x4000, 0x1593: 0x4000,
+	0x15a0: 0x4000, 0x15a1: 0x4000, 0x15a2: 0x4000, 0x15a3: 0x4000,
+	0x15a4: 0x4000, 0x15a5: 0x4000, 0x15a6: 0x4000, 0x15a7: 0x4000, 0x15a8: 0x4000, 0x15a9: 0x4000,
+	0x15aa: 0x4000, 0x15ab: 0x4000, 0x15ac: 0x4000, 0x15ad: 0x4000, 0x15ae: 0x4000, 0x15af: 0x4000,
+	0x15b0: 0x4000, 0x15b1: 0x4000, 0x15b2: 0x4000, 0x15b3: 0x4000, 0x15b4: 0x4000, 0x15b5: 0x4000,
+	0x15b6: 0x4000, 0x15b7: 0x4000, 0x15b8: 0x4000, 0x15b9: 0x4000, 0x15ba: 0x4000, 0x15bb: 0x4000,
+	0x15bc: 0x4000, 0x15bd: 0x4000, 0x15be: 0x4000, 0x15bf: 0x4000,
+	// Block 0x57, offset 0x15c0
+	0x15c0: 0x4000, 0x15c1: 0x4000, 0x15c2: 0x4000, 0x15c3: 0x4000, 0x15c4: 0x4000, 0x15c5: 0x4000,
+	0x15c6: 0x4000, 0x15c7: 0x4000, 0x15c8: 0x4000, 0x15c9: 0x4000, 0x15ca: 0x4000,
+	0x15cf: 0x4000, 0x15d0: 0x4000, 0x15d1: 0x4000,
+	0x15d2: 0x4000, 0x15d3: 0x4000,
+	0x15e0: 0x4000, 0x15e1: 0x4000, 0x15e2: 0x4000, 0x15e3: 0x4000,
+	0x15e4: 0x4000, 0x15e5: 0x4000, 0x15e6: 0x4000, 0x15e7: 0x4000, 0x15e8: 0x4000, 0x15e9: 0x4000,
+	0x15ea: 0x4000, 0x15eb: 0x4000, 0x15ec: 0x4000, 0x15ed: 0x4000, 0x15ee: 0x4000, 0x15ef: 0x4000,
+	0x15f0: 0x4000, 0x15f4: 0x4000,
+	0x15f8: 0x4000, 0x15f9: 0x4000, 0x15fa: 0x4000, 0x15fb: 0x4000,
+	0x15fc: 0x4000, 0x15fd: 0x4000, 0x15fe: 0x4000, 0x15ff: 0x4000,
+	// Block 0x58, offset 0x1600
+	0x1600: 0x4000, 0x1602: 0x4000, 0x1603: 0x4000, 0x1604: 0x4000, 0x1605: 0x4000,
+	0x1606: 0x4000, 0x1607: 0x4000, 0x1608: 0x4000, 0x1609: 0x4000, 0x160a: 0x4000, 0x160b: 0x4000,
+	0x160c: 0x4000, 0x160d: 0x4000, 0x160e: 0x4000, 0x160f: 0x4000, 0x1610: 0x4000, 0x1611: 0x4000,
+	0x1612: 0x4000, 0x1613: 0x4000, 0x1614: 0x4000, 0x1615: 0x4000, 0x1616: 0x4000, 0x1617: 0x4000,
+	0x1618: 0x4000, 0x1619: 0x4000, 0x161a: 0x4000, 0x161b: 0x4000, 0x161c: 0x4000, 0x161d: 0x4000,
+	0x161e: 0x4000, 0x161f: 0x4000, 0x1620: 0x4000, 0x1621: 0x4000, 0x1622: 0x4000, 0x1623: 0x4000,
+	0x1624: 0x4000, 0x1625: 0x4000, 0x1626: 0x4000, 0x1627: 0x4000, 0x1628: 0x4000, 0x1629: 0x4000,
+	0x162a: 0x4000, 0x162b: 0x4000, 0x162c: 0x4000, 0x162d: 0x4000, 0x162e: 0x4000, 0x162f: 0x4000,
+	0x1630: 0x4000, 0x1631: 0x4000, 0x1632: 0x4000, 0x1633: 0x4000, 0x1634: 0x4000, 0x1635: 0x4000,
+	0x1636: 0x4000, 0x1637: 0x4000, 0x1638: 0x4000, 0x1639: 0x4000, 0x163a: 0x4000, 0x163b: 0x4000,
+	0x163c: 0x4000, 0x163d: 0x4000, 0x163e: 0x4000, 0x163f: 0x4000,
+	// Block 0x59, offset 0x1640
+	0x1640: 0x4000, 0x1641: 0x4000, 0x1642: 0x4000, 0x1643: 0x4000, 0x1644: 0x4000, 0x1645: 0x4000,
+	0x1646: 0x4000, 0x1647: 0x4000, 0x1648: 0x4000, 0x1649: 0x4000, 0x164a: 0x4000, 0x164b: 0x4000,
+	0x164c: 0x4000, 0x164d: 0x4000, 0x164e: 0x4000, 0x164f: 0x4000, 0x1650: 0x4000, 0x1651: 0x4000,
+	0x1652: 0x4000, 0x1653: 0x4000, 0x1654: 0x4000, 0x1655: 0x4000, 0x1656: 0x4000, 0x1657: 0x4000,
+	0x1658: 0x4000, 0x1659: 0x4000, 0x165a: 0x4000, 0x165b: 0x4000, 0x165c: 0x4000, 0x165d: 0x4000,
+	0x165e: 0x4000, 0x165f: 0x4000, 0x1660: 0x4000, 0x1661: 0x4000, 0x1662: 0x4000, 0x1663: 0x4000,
+	0x1664: 0x4000, 0x1665: 0x4000, 0x1666: 0x4000, 0x1667: 0x4000, 0x1668: 0x4000, 0x1669: 0x4000,
+	0x166a: 0x4000, 0x166b: 0x4000, 0x166c: 0x4000, 0x166d: 0x4000, 0x166e: 0x4000, 0x166f: 0x4000,
+	0x1670: 0x4000, 0x1671: 0x4000, 0x1672: 0x4000, 0x1673: 0x4000, 0x1674: 0x4000, 0x1675: 0x4000,
+	0x1676: 0x4000, 0x1677: 0x4000, 0x1678: 0x4000, 0x1679: 0x4000, 0x167a: 0x4000, 0x167b: 0x4000,
+	0x167c: 0x4000, 0x167f: 0x4000,
+	// Block 0x5a, offset 0x1680
+	0x1680: 0x4000, 0x1681: 0x4000, 0x1682: 0x4000, 0x1683: 0x4000, 0x1684: 0x4000, 0x1685: 0x4000,
+	0x1686: 0x4000, 0x1687: 0x4000, 0x1688: 0x4000, 0x1689: 0x4000, 0x168a: 0x4000, 0x168b: 0x4000,
+	0x168c: 0x4000, 0x168d: 0x4000, 0x168e: 0x4000, 0x168f: 0x4000, 0x1690: 0x4000, 0x1691: 0x4000,
+	0x1692: 0x4000, 0x1693: 0x4000, 0x1694: 0x4000, 0x1695: 0x4000, 0x1696: 0x4000, 0x1697: 0x4000,
+	0x1698: 0x4000, 0x1699: 0x4000, 0x169a: 0x4000, 0x169b: 0x4000, 0x169c: 0x4000, 0x169d: 0x4000,
+	0x169e: 0x4000, 0x169f: 0x4000, 0x16a0: 0x4000, 0x16a1: 0x4000, 0x16a2: 0x4000, 0x16a3: 0x4000,
+	0x16a4: 0x4000, 0x16a5: 0x4000, 0x16a6: 0x4000, 0x16a7: 0x4000, 0x16a8: 0x4000, 0x16a9: 0x4000,
+	0x16aa: 0x4000, 0x16ab: 0x4000, 0x16ac: 0x4000, 0x16ad: 0x4000, 0x16ae: 0x4000, 0x16af: 0x4000,
+	0x16b0: 0x4000, 0x16b1: 0x4000, 0x16b2: 0x4000, 0x16b3: 0x4000, 0x16b4: 0x4000, 0x16b5: 0x4000,
+	0x16b6: 0x4000, 0x16b7: 0x4000, 0x16b8: 0x4000, 0x16b9: 0x4000, 0x16ba: 0x4000, 0x16bb: 0x4000,
+	0x16bc: 0x4000, 0x16bd: 0x4000,
+	// Block 0x5b, offset 0x16c0
+	0x16cb: 0x4000,
+	0x16cc: 0x4000, 0x16cd: 0x4000, 0x16ce: 0x4000, 0x16d0: 0x4000, 0x16d1: 0x4000,
+	0x16d2: 0x4000, 0x16d3: 0x4000, 0x16d4: 0x4000, 0x16d5: 0x4000, 0x16d6: 0x4000, 0x16d7: 0x4000,
+	0x16d8: 0x4000, 0x16d9: 0x4000, 0x16da: 0x4000, 0x16db: 0x4000, 0x16dc: 0x4000, 0x16dd: 0x4000,
+	0x16de: 0x4000, 0x16df: 0x4000, 0x16e0: 0x4000, 0x16e1: 0x4000, 0x16e2: 0x4000, 0x16e3: 0x4000,
+	0x16e4: 0x4000, 0x16e5: 0x4000, 0x16e6: 0x4000, 0x16e7: 0x4000,
+	0x16fa: 0x4000,
+	// Block 0x5c, offset 0x1700
+	0x1715: 0x4000, 0x1716: 0x4000,
+	0x1724: 0x4000,
+	// Block 0x5d, offset 0x1740
+	0x177b: 0x4000,
+	0x177c: 0x4000, 0x177d: 0x4000, 0x177e: 0x4000, 0x177f: 0x4000,
+	// Block 0x5e, offset 0x1780
+	0x1780: 0x4000, 0x1781: 0x4000, 0x1782: 0x4000, 0x1783: 0x4000, 0x1784: 0x4000, 0x1785: 0x4000,
+	0x1786: 0x4000, 0x1787: 0x4000, 0x1788: 0x4000, 0x1789: 0x4000, 0x178a: 0x4000, 0x178b: 0x4000,
+	0x178c: 0x4000, 0x178d: 0x4000, 0x178e: 0x4000, 0x178f: 0x4000,
+	// Block 0x5f, offset 0x17c0
+	0x17c0: 0x4000, 0x17c1: 0x4000, 0x17c2: 0x4000, 0x17c3: 0x4000, 0x17c4: 0x4000, 0x17c5: 0x4000,
+	0x17cc: 0x4000, 0x17d0: 0x4000, 0x17d1: 0x4000,
+	0x17d2: 0x4000,
+	0x17eb: 0x4000, 0x17ec: 0x4000,
+	0x17f4: 0x4000, 0x17f5: 0x4000,
+	0x17f6: 0x4000, 0x17f7: 0x4000, 0x17f8: 0x4000,
+	// Block 0x60, offset 0x1800
+	0x1810: 0x4000, 0x1811: 0x4000,
+	0x1812: 0x4000, 0x1813: 0x4000, 0x1814: 0x4000, 0x1815: 0x4000, 0x1816: 0x4000, 0x1817: 0x4000,
+	0x1818: 0x4000, 0x1819: 0x4000, 0x181a: 0x4000, 0x181b: 0x4000, 0x181c: 0x4000, 0x181d: 0x4000,
+	0x181e: 0x4000, 0x181f: 0x4000, 0x1820: 0x4000, 0x1821: 0x4000, 0x1822: 0x4000, 0x1823: 0x4000,
+	0x1824: 0x4000, 0x1825: 0x4000, 0x1826: 0x4000, 0x1827: 0x4000, 0x1828: 0x4000, 0x1829: 0x4000,
+	0x182a: 0x4000, 0x182b: 0x4000, 0x182c: 0x4000, 0x182d: 0x4000, 0x182e: 0x4000, 0x182f: 0x4000,
+	0x1830: 0x4000, 0x1831: 0x4000, 0x1832: 0x4000, 0x1833: 0x4000, 0x1834: 0x4000, 0x1835: 0x4000,
+	0x1836: 0x4000, 0x1837: 0x4000, 0x1838: 0x4000, 0x1839: 0x4000, 0x183a: 0x4000, 0x183b: 0x4000,
+	0x183c: 0x4000, 0x183d: 0x4000, 0x183e: 0x4000,
+	// Block 0x61, offset 0x1840
+	0x1840: 0x4000, 0x1841: 0x4000, 0x1842: 0x4000, 0x1843: 0x4000, 0x1844: 0x4000, 0x1845: 0x4000,
+	0x1846: 0x4000, 0x1847: 0x4000, 0x1848: 0x4000, 0x1849: 0x4000, 0x184a: 0x4000, 0x184b: 0x4000,
+	0x184c: 0x4000, 0x1850: 0x4000, 0x1851: 0x4000,
+	0x1852: 0x4000, 0x1853: 0x4000, 0x1854: 0x4000, 0x1855: 0x4000, 0x1856: 0x4000, 0x1857: 0x4000,
+	0x1858: 0x4000, 0x1859: 0x4000, 0x185a: 0x4000, 0x185b: 0x4000, 0x185c: 0x4000, 0x185d: 0x4000,
+	0x185e: 0x4000, 0x185f: 0x4000, 0x1860: 0x4000, 0x1861: 0x4000, 0x1862: 0x4000, 0x1863: 0x4000,
+	0x1864: 0x4000, 0x1865: 0x4000, 0x1866: 0x4000, 0x1867: 0x4000, 0x1868: 0x4000, 0x1869: 0x4000,
+	0x186a: 0x4000, 0x186b: 0x4000,
+	// Block 0x62, offset 0x1880
+	0x1880: 0x4000, 0x1881: 0x4000, 0x1882: 0x4000, 0x1883: 0x4000, 0x1884: 0x4000, 0x1885: 0x4000,
+	0x1886: 0x4000, 0x1887: 0x4000, 0x1888: 0x4000, 0x1889: 0x4000, 0x188a: 0x4000, 0x188b: 0x4000,
+	0x188c: 0x4000, 0x188d: 0x4000, 0x188e: 0x4000, 0x188f: 0x4000, 0x1890: 0x4000, 0x1891: 0x4000,
+	0x1892: 0x4000, 0x1893: 0x4000, 0x1894: 0x4000, 0x1895: 0x4000, 0x1896: 0x4000, 0x1897: 0x4000,
+	// Block 0x63, offset 0x18c0
+	0x18c0: 0x4000,
+	0x18d0: 0x4000, 0x18d1: 0x4000,
+	0x18d2: 0x4000, 0x18d3: 0x4000, 0x18d4: 0x4000, 0x18d5: 0x4000, 0x18d6: 0x4000, 0x18d7: 0x4000,
+	0x18d8: 0x4000, 0x18d9: 0x4000, 0x18da: 0x4000, 0x18db: 0x4000, 0x18dc: 0x4000, 0x18dd: 0x4000,
+	0x18de: 0x4000, 0x18df: 0x4000, 0x18e0: 0x4000, 0x18e1: 0x4000, 0x18e2: 0x4000, 0x18e3: 0x4000,
+	0x18e4: 0x4000, 0x18e5: 0x4000, 0x18e6: 0x4000,
+	// Block 0x64, offset 0x1900
+	0x1900: 0x2000, 0x1901: 0x2000, 0x1902: 0x2000, 0x1903: 0x2000, 0x1904: 0x2000, 0x1905: 0x2000,
+	0x1906: 0x2000, 0x1907: 0x2000, 0x1908: 0x2000, 0x1909: 0x2000, 0x190a: 0x2000, 0x190b: 0x2000,
+	0x190c: 0x2000, 0x190d: 0x2000, 0x190e: 0x2000, 0x190f: 0x2000, 0x1910: 0x2000, 0x1911: 0x2000,
+	0x1912: 0x2000, 0x1913: 0x2000, 0x1914: 0x2000, 0x1915: 0x2000, 0x1916: 0x2000, 0x1917: 0x2000,
+	0x1918: 0x2000, 0x1919: 0x2000, 0x191a: 0x2000, 0x191b: 0x2000, 0x191c: 0x2000, 0x191d: 0x2000,
+	0x191e: 0x2000, 0x191f: 0x2000, 0x1920: 0x2000, 0x1921: 0x2000, 0x1922: 0x2000, 0x1923: 0x2000,
+	0x1924: 0x2000, 0x1925: 0x2000, 0x1926: 0x2000, 0x1927: 0x2000, 0x1928: 0x2000, 0x1929: 0x2000,
+	0x192a: 0x2000, 0x192b: 0x2000, 0x192c: 0x2000, 0x192d: 0x2000, 0x192e: 0x2000, 0x192f: 0x2000,
+	0x1930: 0x2000, 0x1931: 0x2000, 0x1932: 0x2000, 0x1933: 0x2000, 0x1934: 0x2000, 0x1935: 0x2000,
+	0x1936: 0x2000, 0x1937: 0x2000, 0x1938: 0x2000, 0x1939: 0x2000, 0x193a: 0x2000, 0x193b: 0x2000,
+	0x193c: 0x2000, 0x193d: 0x2000,
+}
+
+// widthIndex: 22 blocks, 1408 entries, 1408 bytes
+// Block 0 is the zero block.
+var widthIndex = [1408]uint8{
+	// Block 0x0, offset 0x0
+	// Block 0x1, offset 0x40
+	// Block 0x2, offset 0x80
+	// Block 0x3, offset 0xc0
+	0xc2: 0x01, 0xc3: 0x02, 0xc4: 0x03, 0xc5: 0x04, 0xc7: 0x05,
+	0xc9: 0x06, 0xcb: 0x07, 0xcc: 0x08, 0xcd: 0x09, 0xce: 0x0a, 0xcf: 0x0b,
+	0xd0: 0x0c, 0xd1: 0x0d,
+	0xe1: 0x02, 0xe2: 0x03, 0xe3: 0x04, 0xe4: 0x05, 0xe5: 0x06, 0xe6: 0x06, 0xe7: 0x06,
+	0xe8: 0x06, 0xe9: 0x06, 0xea: 0x07, 0xeb: 0x06, 0xec: 0x06, 0xed: 0x08, 0xee: 0x09, 0xef: 0x0a,
+	0xf0: 0x0f, 0xf3: 0x12, 0xf4: 0x13,
+	// Block 0x4, offset 0x100
+	0x104: 0x0e, 0x105: 0x0f,
+	// Block 0x5, offset 0x140
+	0x140: 0x10, 0x141: 0x11, 0x142: 0x12, 0x144: 0x13, 0x145: 0x14, 0x146: 0x15, 0x147: 0x16,
+	0x148: 0x17, 0x149: 0x18, 0x14a: 0x19, 0x14c: 0x1a, 0x14f: 0x1b,
+	0x151: 0x1c, 0x152: 0x08, 0x153: 0x1d, 0x154: 0x1e, 0x155: 0x1f, 0x156: 0x20, 0x157: 0x21,
+	0x158: 0x22, 0x159: 0x23, 0x15a: 0x24, 0x15b: 0x25, 0x15c: 0x26, 0x15d: 0x27, 0x15e: 0x28, 0x15f: 0x29,
+	0x166: 0x2a,
+	0x16c: 0x2b, 0x16d: 0x2c,
+	0x17a: 0x2d, 0x17b: 0x2e, 0x17c: 0x0e, 0x17d: 0x0e, 0x17e: 0x0e, 0x17f: 0x2f,
+	// Block 0x6, offset 0x180
+	0x180: 0x30, 0x181: 0x31, 0x182: 0x32, 0x183: 0x33, 0x184: 0x34, 0x185: 0x35, 0x186: 0x36, 0x187: 0x37,
+	0x188: 0x38, 0x189: 0x39, 0x18a: 0x0e, 0x18b: 0x3a, 0x18c: 0x0e, 0x18d: 0x0e, 0x18e: 0x0e, 0x18f: 0x0e,
+	0x190: 0x0e, 0x191: 0x0e, 0x192: 0x0e, 0x193: 0x0e, 0x194: 0x0e, 0x195: 0x0e, 0x196: 0x0e, 0x197: 0x0e,
+	0x198: 0x0e, 0x199: 0x0e, 0x19a: 0x0e, 0x19b: 0x0e, 0x19c: 0x0e, 0x19d: 0x0e, 0x19e: 0x0e, 0x19f: 0x0e,
+	0x1a0: 0x0e, 0x1a1: 0x0e, 0x1a2: 0x0e, 0x1a3: 0x0e, 0x1a4: 0x0e, 0x1a5: 0x0e, 0x1a6: 0x0e, 0x1a7: 0x0e,
+	0x1a8: 0x0e, 0x1a9: 0x0e, 0x1aa: 0x0e, 0x1ab: 0x0e, 0x1ac: 0x0e, 0x1ad: 0x0e, 0x1ae: 0x0e, 0x1af: 0x0e,
+	0x1b0: 0x0e, 0x1b1: 0x0e, 0x1b2: 0x0e, 0x1b3: 0x0e, 0x1b4: 0x0e, 0x1b5: 0x0e, 0x1b6: 0x0e, 0x1b7: 0x0e,
+	0x1b8: 0x0e, 0x1b9: 0x0e, 0x1ba: 0x0e, 0x1bb: 0x0e, 0x1bc: 0x0e, 0x1bd: 0x0e, 0x1be: 0x0e, 0x1bf: 0x0e,
+	// Block 0x7, offset 0x1c0
+	0x1c0: 0x0e, 0x1c1: 0x0e, 0x1c2: 0x0e, 0x1c3: 0x0e, 0x1c4: 0x0e, 0x1c5: 0x0e, 0x1c6: 0x0e, 0x1c7: 0x0e,
+	0x1c8: 0x0e, 0x1c9: 0x0e, 0x1ca: 0x0e, 0x1cb: 0x0e, 0x1cc: 0x0e, 0x1cd: 0x0e, 0x1ce: 0x0e, 0x1cf: 0x0e,
+	0x1d0: 0x0e, 0x1d1: 0x0e, 0x1d2: 0x0e, 0x1d3: 0x0e, 0x1d4: 0x0e, 0x1d5: 0x0e, 0x1d6: 0x0e, 0x1d7: 0x0e,
+	0x1d8: 0x0e, 0x1d9: 0x0e, 0x1da: 0x0e, 0x1db: 0x0e, 0x1dc: 0x0e, 0x1dd: 0x0e, 0x1de: 0x0e, 0x1df: 0x0e,
+	0x1e0: 0x0e, 0x1e1: 0x0e, 0x1e2: 0x0e, 0x1e3: 0x0e, 0x1e4: 0x0e, 0x1e5: 0x0e, 0x1e6: 0x0e, 0x1e7: 0x0e,
+	0x1e8: 0x0e, 0x1e9: 0x0e, 0x1ea: 0x0e, 0x1eb: 0x0e, 0x1ec: 0x0e, 0x1ed: 0x0e, 0x1ee: 0x0e, 0x1ef: 0x0e,
+	0x1f0: 0x0e, 0x1f1: 0x0e, 0x1f2: 0x0e, 0x1f3: 0x0e, 0x1f4: 0x0e, 0x1f5: 0x0e, 0x1f6: 0x0e,
+	0x1f8: 0x0e, 0x1f9: 0x0e, 0x1fa: 0x0e, 0x1fb: 0x0e, 0x1fc: 0x0e, 0x1fd: 0x0e, 0x1fe: 0x0e, 0x1ff: 0x0e,
+	// Block 0x8, offset 0x200
+	0x200: 0x0e, 0x201: 0x0e, 0x202: 0x0e, 0x203: 0x0e, 0x204: 0x0e, 0x205: 0x0e, 0x206: 0x0e, 0x207: 0x0e,
+	0x208: 0x0e, 0x209: 0x0e, 0x20a: 0x0e, 0x20b: 0x0e, 0x20c: 0x0e, 0x20d: 0x0e, 0x20e: 0x0e, 0x20f: 0x0e,
+	0x210: 0x0e, 0x211: 0x0e, 0x212: 0x0e, 0x213: 0x0e, 0x214: 0x0e, 0x215: 0x0e, 0x216: 0x0e, 0x217: 0x0e,
+	0x218: 0x0e, 0x219: 0x0e, 0x21a: 0x0e, 0x21b: 0x0e, 0x21c: 0x0e, 0x21d: 0x0e, 0x21e: 0x0e, 0x21f: 0x0e,
+	0x220: 0x0e, 0x221: 0x0e, 0x222: 0x0e, 0x223: 0x0e, 0x224: 0x0e, 0x225: 0x0e, 0x226: 0x0e, 0x227: 0x0e,
+	0x228: 0x0e, 0x229: 0x0e, 0x22a: 0x0e, 0x22b: 0x0e, 0x22c: 0x0e, 0x22d: 0x0e, 0x22e: 0x0e, 0x22f: 0x0e,
+	0x230: 0x0e, 0x231: 0x0e, 0x232: 0x0e, 0x233: 0x0e, 0x234: 0x0e, 0x235: 0x0e, 0x236: 0x0e, 0x237: 0x0e,
+	0x238: 0x0e, 0x239: 0x0e, 0x23a: 0x0e, 0x23b: 0x0e, 0x23c: 0x0e, 0x23d: 0x0e, 0x23e: 0x0e, 0x23f: 0x0e,
+	// Block 0x9, offset 0x240
+	0x240: 0x0e, 0x241: 0x0e, 0x242: 0x0e, 0x243: 0x0e, 0x244: 0x0e, 0x245: 0x0e, 0x246: 0x0e, 0x247: 0x0e,
+	0x248: 0x0e, 0x249: 0x0e, 0x24a: 0x0e, 0x24b: 0x0e, 0x24c: 0x0e, 0x24d: 0x0e, 0x24e: 0x0e, 0x24f: 0x0e,
+	0x250: 0x0e, 0x251: 0x0e, 0x252: 0x3b, 0x253: 0x3c,
+	0x265: 0x3d,
+	0x270: 0x0e, 0x271: 0x0e, 0x272: 0x0e, 0x273: 0x0e, 0x274: 0x0e, 0x275: 0x0e, 0x276: 0x0e, 0x277: 0x0e,
+	0x278: 0x0e, 0x279: 0x0e, 0x27a: 0x0e, 0x27b: 0x0e, 0x27c: 0x0e, 0x27d: 0x0e, 0x27e: 0x0e, 0x27f: 0x0e,
+	// Block 0xa, offset 0x280
+	0x280: 0x0e, 0x281: 0x0e, 0x282: 0x0e, 0x283: 0x0e, 0x284: 0x0e, 0x285: 0x0e, 0x286: 0x0e, 0x287: 0x0e,
+	0x288: 0x0e, 0x289: 0x0e, 0x28a: 0x0e, 0x28b: 0x0e, 0x28c: 0x0e, 0x28d: 0x0e, 0x28e: 0x0e, 0x28f: 0x0e,
+	0x290: 0x0e, 0x291: 0x0e, 0x292: 0x0e, 0x293: 0x0e, 0x294: 0x0e, 0x295: 0x0e, 0x296: 0x0e, 0x297: 0x0e,
+	0x298: 0x0e, 0x299: 0x0e, 0x29a: 0x0e, 0x29b: 0x0e, 0x29c: 0x0e, 0x29d: 0x0e, 0x29e: 0x3e,
+	// Block 0xb, offset 0x2c0
+	0x2c0: 0x08, 0x2c1: 0x08, 0x2c2: 0x08, 0x2c3: 0x08, 0x2c4: 0x08, 0x2c5: 0x08, 0x2c6: 0x08, 0x2c7: 0x08,
+	0x2c8: 0x08, 0x2c9: 0x08, 0x2ca: 0x08, 0x2cb: 0x08, 0x2cc: 0x08, 0x2cd: 0x08, 0x2ce: 0x08, 0x2cf: 0x08,
+	0x2d0: 0x08, 0x2d1: 0x08, 0x2d2: 0x08, 0x2d3: 0x08, 0x2d4: 0x08, 0x2d5: 0x08, 0x2d6: 0x08, 0x2d7: 0x08,
+	0x2d8: 0x08, 0x2d9: 0x08, 0x2da: 0x08, 0x2db: 0x08, 0x2dc: 0x08, 0x2dd: 0x08, 0x2de: 0x08, 0x2df: 0x08,
+	0x2e0: 0x08, 0x2e1: 0x08, 0x2e2: 0x08, 0x2e3: 0x08, 0x2e4: 0x08, 0x2e5: 0x08, 0x2e6: 0x08, 0x2e7: 0x08,
+	0x2e8: 0x08, 0x2e9: 0x08, 0x2ea: 0x08, 0x2eb: 0x08, 0x2ec: 0x08, 0x2ed: 0x08, 0x2ee: 0x08, 0x2ef: 0x08,
+	0x2f0: 0x08, 0x2f1: 0x08, 0x2f2: 0x08, 0x2f3: 0x08, 0x2f4: 0x08, 0x2f5: 0x08, 0x2f6: 0x08, 0x2f7: 0x08,
+	0x2f8: 0x08, 0x2f9: 0x08, 0x2fa: 0x08, 0x2fb: 0x08, 0x2fc: 0x08, 0x2fd: 0x08, 0x2fe: 0x08, 0x2ff: 0x08,
+	// Block 0xc, offset 0x300
+	0x300: 0x08, 0x301: 0x08, 0x302: 0x08, 0x303: 0x08, 0x304: 0x08, 0x305: 0x08, 0x306: 0x08, 0x307: 0x08,
+	0x308: 0x08, 0x309: 0x08, 0x30a: 0x08, 0x30b: 0x08, 0x30c: 0x08, 0x30d: 0x08, 0x30e: 0x08, 0x30f: 0x08,
+	0x310: 0x08, 0x311: 0x08, 0x312: 0x08, 0x313: 0x08, 0x314: 0x08, 0x315: 0x08, 0x316: 0x08, 0x317: 0x08,
+	0x318: 0x08, 0x319: 0x08, 0x31a: 0x08, 0x31b: 0x08, 0x31c: 0x08, 0x31d: 0x08, 0x31e: 0x08, 0x31f: 0x08,
+	0x320: 0x08, 0x321: 0x08, 0x322: 0x08, 0x323: 0x08, 0x324: 0x0e, 0x325: 0x0e, 0x326: 0x0e, 0x327: 0x0e,
+	0x328: 0x0e, 0x329: 0x0e, 0x32a: 0x0e, 0x32b: 0x0e,
+	0x338: 0x3f, 0x339: 0x40, 0x33c: 0x41, 0x33d: 0x42, 0x33e: 0x43, 0x33f: 0x44,
+	// Block 0xd, offset 0x340
+	0x37f: 0x45,
+	// Block 0xe, offset 0x380
+	0x380: 0x0e, 0x381: 0x0e, 0x382: 0x0e, 0x383: 0x0e, 0x384: 0x0e, 0x385: 0x0e, 0x386: 0x0e, 0x387: 0x0e,
+	0x388: 0x0e, 0x389: 0x0e, 0x38a: 0x0e, 0x38b: 0x0e, 0x38c: 0x0e, 0x38d: 0x0e, 0x38e: 0x0e, 0x38f: 0x0e,
+	0x390: 0x0e, 0x391: 0x0e, 0x392: 0x0e, 0x393: 0x0e, 0x394: 0x0e, 0x395: 0x0e, 0x396: 0x0e, 0x397: 0x0e,
+	0x398: 0x0e, 0x399: 0x0e, 0x39a: 0x0e, 0x39b: 0x0e, 0x39c: 0x0e, 0x39d: 0x0e, 0x39e: 0x0e, 0x39f: 0x46,
+	0x3a0: 0x0e, 0x3a1: 0x0e, 0x3a2: 0x0e, 0x3a3: 0x0e, 0x3a4: 0x0e, 0x3a5: 0x0e, 0x3a6: 0x0e, 0x3a7: 0x0e,
+	0x3a8: 0x0e, 0x3a9: 0x0e, 0x3aa: 0x0e, 0x3ab: 0x47,
+	// Block 0xf, offset 0x3c0
+	0x3c0: 0x0e, 0x3c1: 0x0e, 0x3c2: 0x0e, 0x3c3: 0x0e, 0x3c4: 0x48, 0x3c5: 0x49, 0x3c6: 0x0e, 0x3c7: 0x0e,
+	0x3c8: 0x0e, 0x3c9: 0x0e, 0x3ca: 0x0e, 0x3cb: 0x4a,
+	// Block 0x10, offset 0x400
+	0x400: 0x4b, 0x403: 0x4c, 0x404: 0x4d, 0x405: 0x4e, 0x406: 0x4f,
+	0x408: 0x50, 0x409: 0x51, 0x40c: 0x52, 0x40d: 0x53, 0x40e: 0x54, 0x40f: 0x55,
+	0x410: 0x3a, 0x411: 0x56, 0x412: 0x0e, 0x413: 0x57, 0x414: 0x58, 0x415: 0x59, 0x416: 0x5a, 0x417: 0x5b,
+	0x418: 0x0e, 0x419: 0x5c, 0x41a: 0x0e, 0x41b: 0x5d,
+	0x424: 0x5e, 0x425: 0x5f, 0x426: 0x60, 0x427: 0x61,
+	// Block 0x11, offset 0x440
+	0x456: 0x0b, 0x457: 0x06,
+	0x458: 0x0c, 0x45b: 0x0d, 0x45f: 0x0e,
+	0x460: 0x06, 0x461: 0x06, 0x462: 0x06, 0x463: 0x06, 0x464: 0x06, 0x465: 0x06, 0x466: 0x06, 0x467: 0x06,
+	0x468: 0x06, 0x469: 0x06, 0x46a: 0x06, 0x46b: 0x06, 0x46c: 0x06, 0x46d: 0x06, 0x46e: 0x06, 0x46f: 0x06,
+	0x470: 0x06, 0x471: 0x06, 0x472: 0x06, 0x473: 0x06, 0x474: 0x06, 0x475: 0x06, 0x476: 0x06, 0x477: 0x06,
+	0x478: 0x06, 0x479: 0x06, 0x47a: 0x06, 0x47b: 0x06, 0x47c: 0x06, 0x47d: 0x06, 0x47e: 0x06, 0x47f: 0x06,
+	// Block 0x12, offset 0x480
+	0x484: 0x08, 0x485: 0x08, 0x486: 0x08, 0x487: 0x09,
+	// Block 0x13, offset 0x4c0
+	0x4c0: 0x08, 0x4c1: 0x08, 0x4c2: 0x08, 0x4c3: 0x08, 0x4c4: 0x08, 0x4c5: 0x08, 0x4c6: 0x08, 0x4c7: 0x08,
+	0x4c8: 0x08, 0x4c9: 0x08, 0x4ca: 0x08, 0x4cb: 0x08, 0x4cc: 0x08, 0x4cd: 0x08, 0x4ce: 0x08, 0x4cf: 0x08,
+	0x4d0: 0x08, 0x4d1: 0x08, 0x4d2: 0x08, 0x4d3: 0x08, 0x4d4: 0x08, 0x4d5: 0x08, 0x4d6: 0x08, 0x4d7: 0x08,
+	0x4d8: 0x08, 0x4d9: 0x08, 0x4da: 0x08, 0x4db: 0x08, 0x4dc: 0x08, 0x4dd: 0x08, 0x4de: 0x08, 0x4df: 0x08,
+	0x4e0: 0x08, 0x4e1: 0x08, 0x4e2: 0x08, 0x4e3: 0x08, 0x4e4: 0x08, 0x4e5: 0x08, 0x4e6: 0x08, 0x4e7: 0x08,
+	0x4e8: 0x08, 0x4e9: 0x08, 0x4ea: 0x08, 0x4eb: 0x08, 0x4ec: 0x08, 0x4ed: 0x08, 0x4ee: 0x08, 0x4ef: 0x08,
+	0x4f0: 0x08, 0x4f1: 0x08, 0x4f2: 0x08, 0x4f3: 0x08, 0x4f4: 0x08, 0x4f5: 0x08, 0x4f6: 0x08, 0x4f7: 0x08,
+	0x4f8: 0x08, 0x4f9: 0x08, 0x4fa: 0x08, 0x4fb: 0x08, 0x4fc: 0x08, 0x4fd: 0x08, 0x4fe: 0x08, 0x4ff: 0x62,
+	// Block 0x14, offset 0x500
+	0x520: 0x10,
+	0x530: 0x09, 0x531: 0x09, 0x532: 0x09, 0x533: 0x09, 0x534: 0x09, 0x535: 0x09, 0x536: 0x09, 0x537: 0x09,
+	0x538: 0x09, 0x539: 0x09, 0x53a: 0x09, 0x53b: 0x09, 0x53c: 0x09, 0x53d: 0x09, 0x53e: 0x09, 0x53f: 0x11,
+	// Block 0x15, offset 0x540
+	0x540: 0x09, 0x541: 0x09, 0x542: 0x09, 0x543: 0x09, 0x544: 0x09, 0x545: 0x09, 0x546: 0x09, 0x547: 0x09,
+	0x548: 0x09, 0x549: 0x09, 0x54a: 0x09, 0x54b: 0x09, 0x54c: 0x09, 0x54d: 0x09, 0x54e: 0x09, 0x54f: 0x11,
+}
+
+// inverseData contains 4-byte entries of the following format:
+//   <length> <modified UTF-8-encoded rune> <0 padding>
+// The last byte of the UTF-8-encoded rune is xor-ed with the last byte of the
+// UTF-8 encoding of the original rune. Mappings often have the following
+// pattern:
+//   A -> A  (U+FF21 -> U+0041)
+//   B -> B  (U+FF22 -> U+0042)
+//   ...
+// By xor-ing the last byte the same entry can be shared by many mappings. This
+// reduces the total number of distinct entries by about two thirds.
+// The resulting entry for the aforementioned mappings is
+//   { 0x01, 0xE0, 0x00, 0x00 }
+// Using this entry to map U+FF21 (UTF-8 [EF BC A1]), we get
+//   E0 ^ A1 = 41.
+// Similarly, for U+FF22 (UTF-8 [EF BC A2]), we get
+//   E0 ^ A2 = 42.
+// Note that because of the xor-ing, the byte sequence stored in the entry is
+// not valid UTF-8.
+var inverseData = [150][4]byte{
+	{0x00, 0x00, 0x00, 0x00},
+	{0x03, 0xe3, 0x80, 0xa0},
+	{0x03, 0xef, 0xbc, 0xa0},
+	{0x03, 0xef, 0xbc, 0xe0},
+	{0x03, 0xef, 0xbd, 0xe0},
+	{0x03, 0xef, 0xbf, 0x02},
+	{0x03, 0xef, 0xbf, 0x00},
+	{0x03, 0xef, 0xbf, 0x0e},
+	{0x03, 0xef, 0xbf, 0x0c},
+	{0x03, 0xef, 0xbf, 0x0f},
+	{0x03, 0xef, 0xbf, 0x39},
+	{0x03, 0xef, 0xbf, 0x3b},
+	{0x03, 0xef, 0xbf, 0x3f},
+	{0x03, 0xef, 0xbf, 0x2a},
+	{0x03, 0xef, 0xbf, 0x0d},
+	{0x03, 0xef, 0xbf, 0x25},
+	{0x03, 0xef, 0xbd, 0x1a},
+	{0x03, 0xef, 0xbd, 0x26},
+	{0x01, 0xa0, 0x00, 0x00},
+	{0x03, 0xef, 0xbd, 0x25},
+	{0x03, 0xef, 0xbd, 0x23},
+	{0x03, 0xef, 0xbd, 0x2e},
+	{0x03, 0xef, 0xbe, 0x07},
+	{0x03, 0xef, 0xbe, 0x05},
+	{0x03, 0xef, 0xbd, 0x06},
+	{0x03, 0xef, 0xbd, 0x13},
+	{0x03, 0xef, 0xbd, 0x0b},
+	{0x03, 0xef, 0xbd, 0x16},
+	{0x03, 0xef, 0xbd, 0x0c},
+	{0x03, 0xef, 0xbd, 0x15},
+	{0x03, 0xef, 0xbd, 0x0d},
+	{0x03, 0xef, 0xbd, 0x1c},
+	{0x03, 0xef, 0xbd, 0x02},
+	{0x03, 0xef, 0xbd, 0x1f},
+	{0x03, 0xef, 0xbd, 0x1d},
+	{0x03, 0xef, 0xbd, 0x17},
+	{0x03, 0xef, 0xbd, 0x08},
+	{0x03, 0xef, 0xbd, 0x09},
+	{0x03, 0xef, 0xbd, 0x0e},
+	{0x03, 0xef, 0xbd, 0x04},
+	{0x03, 0xef, 0xbd, 0x05},
+	{0x03, 0xef, 0xbe, 0x3f},
+	{0x03, 0xef, 0xbe, 0x00},
+	{0x03, 0xef, 0xbd, 0x2c},
+	{0x03, 0xef, 0xbe, 0x06},
+	{0x03, 0xef, 0xbe, 0x0c},
+	{0x03, 0xef, 0xbe, 0x0f},
+	{0x03, 0xef, 0xbe, 0x0d},
+	{0x03, 0xef, 0xbe, 0x0b},
+	{0x03, 0xef, 0xbe, 0x19},
+	{0x03, 0xef, 0xbe, 0x15},
+	{0x03, 0xef, 0xbe, 0x11},
+	{0x03, 0xef, 0xbe, 0x31},
+	{0x03, 0xef, 0xbe, 0x33},
+	{0x03, 0xef, 0xbd, 0x0f},
+	{0x03, 0xef, 0xbe, 0x30},
+	{0x03, 0xef, 0xbe, 0x3e},
+	{0x03, 0xef, 0xbe, 0x32},
+	{0x03, 0xef, 0xbe, 0x36},
+	{0x03, 0xef, 0xbd, 0x14},
+	{0x03, 0xef, 0xbe, 0x2e},
+	{0x03, 0xef, 0xbd, 0x1e},
+	{0x03, 0xef, 0xbe, 0x10},
+	{0x03, 0xef, 0xbf, 0x13},
+	{0x03, 0xef, 0xbf, 0x15},
+	{0x03, 0xef, 0xbf, 0x17},
+	{0x03, 0xef, 0xbf, 0x1f},
+	{0x03, 0xef, 0xbf, 0x1d},
+	{0x03, 0xef, 0xbf, 0x1b},
+	{0x03, 0xef, 0xbf, 0x09},
+	{0x03, 0xef, 0xbf, 0x0b},
+	{0x03, 0xef, 0xbf, 0x37},
+	{0x03, 0xef, 0xbe, 0x04},
+	{0x01, 0xe0, 0x00, 0x00},
+	{0x03, 0xe2, 0xa6, 0x1a},
+	{0x03, 0xe2, 0xa6, 0x26},
+	{0x03, 0xe3, 0x80, 0x23},
+	{0x03, 0xe3, 0x80, 0x2e},
+	{0x03, 0xe3, 0x80, 0x25},
+	{0x03, 0xe3, 0x83, 0x1e},
+	{0x03, 0xe3, 0x83, 0x14},
+	{0x03, 0xe3, 0x82, 0x06},
+	{0x03, 0xe3, 0x82, 0x0b},
+	{0x03, 0xe3, 0x82, 0x0c},
+	{0x03, 0xe3, 0x82, 0x0d},
+	{0x03, 0xe3, 0x82, 0x02},
+	{0x03, 0xe3, 0x83, 0x0f},
+	{0x03, 0xe3, 0x83, 0x08},
+	{0x03, 0xe3, 0x83, 0x09},
+	{0x03, 0xe3, 0x83, 0x2c},
+	{0x03, 0xe3, 0x83, 0x0c},
+	{0x03, 0xe3, 0x82, 0x13},
+	{0x03, 0xe3, 0x82, 0x16},
+	{0x03, 0xe3, 0x82, 0x15},
+	{0x03, 0xe3, 0x82, 0x1c},
+	{0x03, 0xe3, 0x82, 0x1f},
+	{0x03, 0xe3, 0x82, 0x1d},
+	{0x03, 0xe3, 0x82, 0x1a},
+	{0x03, 0xe3, 0x82, 0x17},
+	{0x03, 0xe3, 0x82, 0x08},
+	{0x03, 0xe3, 0x82, 0x09},
+	{0x03, 0xe3, 0x82, 0x0e},
+	{0x03, 0xe3, 0x82, 0x04},
+	{0x03, 0xe3, 0x82, 0x05},
+	{0x03, 0xe3, 0x82, 0x3f},
+	{0x03, 0xe3, 0x83, 0x00},
+	{0x03, 0xe3, 0x83, 0x06},
+	{0x03, 0xe3, 0x83, 0x05},
+	{0x03, 0xe3, 0x83, 0x0d},
+	{0x03, 0xe3, 0x83, 0x0b},
+	{0x03, 0xe3, 0x83, 0x07},
+	{0x03, 0xe3, 0x83, 0x19},
+	{0x03, 0xe3, 0x83, 0x15},
+	{0x03, 0xe3, 0x83, 0x11},
+	{0x03, 0xe3, 0x83, 0x31},
+	{0x03, 0xe3, 0x83, 0x33},
+	{0x03, 0xe3, 0x83, 0x30},
+	{0x03, 0xe3, 0x83, 0x3e},
+	{0x03, 0xe3, 0x83, 0x32},
+	{0x03, 0xe3, 0x83, 0x36},
+	{0x03, 0xe3, 0x83, 0x2e},
+	{0x03, 0xe3, 0x82, 0x07},
+	{0x03, 0xe3, 0x85, 0x04},
+	{0x03, 0xe3, 0x84, 0x10},
+	{0x03, 0xe3, 0x85, 0x30},
+	{0x03, 0xe3, 0x85, 0x0d},
+	{0x03, 0xe3, 0x85, 0x13},
+	{0x03, 0xe3, 0x85, 0x15},
+	{0x03, 0xe3, 0x85, 0x17},
+	{0x03, 0xe3, 0x85, 0x1f},
+	{0x03, 0xe3, 0x85, 0x1d},
+	{0x03, 0xe3, 0x85, 0x1b},
+	{0x03, 0xe3, 0x85, 0x09},
+	{0x03, 0xe3, 0x85, 0x0f},
+	{0x03, 0xe3, 0x85, 0x0b},
+	{0x03, 0xe3, 0x85, 0x37},
+	{0x03, 0xe3, 0x85, 0x3b},
+	{0x03, 0xe3, 0x85, 0x39},
+	{0x03, 0xe3, 0x85, 0x3f},
+	{0x02, 0xc2, 0x02, 0x00},
+	{0x02, 0xc2, 0x0e, 0x00},
+	{0x02, 0xc2, 0x0c, 0x00},
+	{0x02, 0xc2, 0x00, 0x00},
+	{0x03, 0xe2, 0x82, 0x0f},
+	{0x03, 0xe2, 0x94, 0x2a},
+	{0x03, 0xe2, 0x86, 0x39},
+	{0x03, 0xe2, 0x86, 0x3b},
+	{0x03, 0xe2, 0x86, 0x3f},
+	{0x03, 0xe2, 0x96, 0x0d},
+	{0x03, 0xe2, 0x97, 0x25},
+}
+
+// Total table size 14936 bytes (14KiB)
diff --git a/vendor/golang.org/x/text/width/tables11.0.0.go b/vendor/golang.org/x/text/width/tables11.0.0.go
new file mode 100644
index 00000000..d6def0e7
--- /dev/null
+++ b/vendor/golang.org/x/text/width/tables11.0.0.go
@@ -0,0 +1,1330 @@
+// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
+
+// +build go1.13
+
+package width
+
+// UnicodeVersion is the Unicode version from which the tables in this package are derived.
+const UnicodeVersion = "11.0.0"
+
+// lookup returns the trie value for the first UTF-8 encoding in s and
+// the width in bytes of this encoding. The size will be 0 if s does not
+// hold enough bytes to complete the encoding. len(s) must be greater than 0.
+func (t *widthTrie) lookup(s []byte) (v uint16, sz int) {
+	c0 := s[0]
+	switch {
+	case c0 < 0x80: // is ASCII
+		return widthValues[c0], 1
+	case c0 < 0xC2:
+		return 0, 1 // Illegal UTF-8: not a starter, not ASCII.
+	case c0 < 0xE0: // 2-byte UTF-8
+		if len(s) < 2 {
+			return 0, 0
+		}
+		i := widthIndex[c0]
+		c1 := s[1]
+		if c1 < 0x80 || 0xC0 <= c1 {
+			return 0, 1 // Illegal UTF-8: not a continuation byte.
+		}
+		return t.lookupValue(uint32(i), c1), 2
+	case c0 < 0xF0: // 3-byte UTF-8
+		if len(s) < 3 {
+			return 0, 0
+		}
+		i := widthIndex[c0]
+		c1 := s[1]
+		if c1 < 0x80 || 0xC0 <= c1 {
+			return 0, 1 // Illegal UTF-8: not a continuation byte.
+		}
+		o := uint32(i)<<6 + uint32(c1)
+		i = widthIndex[o]
+		c2 := s[2]
+		if c2 < 0x80 || 0xC0 <= c2 {
+			return 0, 2 // Illegal UTF-8: not a continuation byte.
+		}
+		return t.lookupValue(uint32(i), c2), 3
+	case c0 < 0xF8: // 4-byte UTF-8
+		if len(s) < 4 {
+			return 0, 0
+		}
+		i := widthIndex[c0]
+		c1 := s[1]
+		if c1 < 0x80 || 0xC0 <= c1 {
+			return 0, 1 // Illegal UTF-8: not a continuation byte.
+		}
+		o := uint32(i)<<6 + uint32(c1)
+		i = widthIndex[o]
+		c2 := s[2]
+		if c2 < 0x80 || 0xC0 <= c2 {
+			return 0, 2 // Illegal UTF-8: not a continuation byte.
+		}
+		o = uint32(i)<<6 + uint32(c2)
+		i = widthIndex[o]
+		c3 := s[3]
+		if c3 < 0x80 || 0xC0 <= c3 {
+			return 0, 3 // Illegal UTF-8: not a continuation byte.
+		}
+		return t.lookupValue(uint32(i), c3), 4
+	}
+	// Illegal rune
+	return 0, 1
+}
+
+// lookupUnsafe returns the trie value for the first UTF-8 encoding in s.
+// s must start with a full and valid UTF-8 encoded rune.
+func (t *widthTrie) lookupUnsafe(s []byte) uint16 {
+	c0 := s[0]
+	if c0 < 0x80 { // is ASCII
+		return widthValues[c0]
+	}
+	i := widthIndex[c0]
+	if c0 < 0xE0 { // 2-byte UTF-8
+		return t.lookupValue(uint32(i), s[1])
+	}
+	i = widthIndex[uint32(i)<<6+uint32(s[1])]
+	if c0 < 0xF0 { // 3-byte UTF-8
+		return t.lookupValue(uint32(i), s[2])
+	}
+	i = widthIndex[uint32(i)<<6+uint32(s[2])]
+	if c0 < 0xF8 { // 4-byte UTF-8
+		return t.lookupValue(uint32(i), s[3])
+	}
+	return 0
+}
+
+// lookupString returns the trie value for the first UTF-8 encoding in s and
+// the width in bytes of this encoding. The size will be 0 if s does not
+// hold enough bytes to complete the encoding. len(s) must be greater than 0.
+func (t *widthTrie) lookupString(s string) (v uint16, sz int) {
+	c0 := s[0]
+	switch {
+	case c0 < 0x80: // is ASCII
+		return widthValues[c0], 1
+	case c0 < 0xC2:
+		return 0, 1 // Illegal UTF-8: not a starter, not ASCII.
+	case c0 < 0xE0: // 2-byte UTF-8
+		if len(s) < 2 {
+			return 0, 0
+		}
+		i := widthIndex[c0]
+		c1 := s[1]
+		if c1 < 0x80 || 0xC0 <= c1 {
+			return 0, 1 // Illegal UTF-8: not a continuation byte.
+		}
+		return t.lookupValue(uint32(i), c1), 2
+	case c0 < 0xF0: // 3-byte UTF-8
+		if len(s) < 3 {
+			return 0, 0
+		}
+		i := widthIndex[c0]
+		c1 := s[1]
+		if c1 < 0x80 || 0xC0 <= c1 {
+			return 0, 1 // Illegal UTF-8: not a continuation byte.
+		}
+		o := uint32(i)<<6 + uint32(c1)
+		i = widthIndex[o]
+		c2 := s[2]
+		if c2 < 0x80 || 0xC0 <= c2 {
+			return 0, 2 // Illegal UTF-8: not a continuation byte.
+		}
+		return t.lookupValue(uint32(i), c2), 3
+	case c0 < 0xF8: // 4-byte UTF-8
+		if len(s) < 4 {
+			return 0, 0
+		}
+		i := widthIndex[c0]
+		c1 := s[1]
+		if c1 < 0x80 || 0xC0 <= c1 {
+			return 0, 1 // Illegal UTF-8: not a continuation byte.
+		}
+		o := uint32(i)<<6 + uint32(c1)
+		i = widthIndex[o]
+		c2 := s[2]
+		if c2 < 0x80 || 0xC0 <= c2 {
+			return 0, 2 // Illegal UTF-8: not a continuation byte.
+		}
+		o = uint32(i)<<6 + uint32(c2)
+		i = widthIndex[o]
+		c3 := s[3]
+		if c3 < 0x80 || 0xC0 <= c3 {
+			return 0, 3 // Illegal UTF-8: not a continuation byte.
+		}
+		return t.lookupValue(uint32(i), c3), 4
+	}
+	// Illegal rune
+	return 0, 1
+}
+
+// lookupStringUnsafe returns the trie value for the first UTF-8 encoding in s.
+// s must start with a full and valid UTF-8 encoded rune.
+func (t *widthTrie) lookupStringUnsafe(s string) uint16 {
+	c0 := s[0]
+	if c0 < 0x80 { // is ASCII
+		return widthValues[c0]
+	}
+	i := widthIndex[c0]
+	if c0 < 0xE0 { // 2-byte UTF-8
+		return t.lookupValue(uint32(i), s[1])
+	}
+	i = widthIndex[uint32(i)<<6+uint32(s[1])]
+	if c0 < 0xF0 { // 3-byte UTF-8
+		return t.lookupValue(uint32(i), s[2])
+	}
+	i = widthIndex[uint32(i)<<6+uint32(s[2])]
+	if c0 < 0xF8 { // 4-byte UTF-8
+		return t.lookupValue(uint32(i), s[3])
+	}
+	return 0
+}
+
+// widthTrie. Total size: 14336 bytes (14.00 KiB). Checksum: c0f7712776e71cd4.
+type widthTrie struct{}
+
+func newWidthTrie(i int) *widthTrie {
+	return &widthTrie{}
+}
+
+// lookupValue determines the type of block n and looks up the value for b.
+func (t *widthTrie) lookupValue(n uint32, b byte) uint16 {
+	switch {
+	default:
+		return uint16(widthValues[n<<6+uint32(b)])
+	}
+}
+
+// widthValues: 101 blocks, 6464 entries, 12928 bytes
+// The third block is the zero block.
+var widthValues = [6464]uint16{
+	// Block 0x0, offset 0x0
+	0x20: 0x6001, 0x21: 0x6002, 0x22: 0x6002, 0x23: 0x6002,
+	0x24: 0x6002, 0x25: 0x6002, 0x26: 0x6002, 0x27: 0x6002, 0x28: 0x6002, 0x29: 0x6002,
+	0x2a: 0x6002, 0x2b: 0x6002, 0x2c: 0x6002, 0x2d: 0x6002, 0x2e: 0x6002, 0x2f: 0x6002,
+	0x30: 0x6002, 0x31: 0x6002, 0x32: 0x6002, 0x33: 0x6002, 0x34: 0x6002, 0x35: 0x6002,
+	0x36: 0x6002, 0x37: 0x6002, 0x38: 0x6002, 0x39: 0x6002, 0x3a: 0x6002, 0x3b: 0x6002,
+	0x3c: 0x6002, 0x3d: 0x6002, 0x3e: 0x6002, 0x3f: 0x6002,
+	// Block 0x1, offset 0x40
+	0x40: 0x6003, 0x41: 0x6003, 0x42: 0x6003, 0x43: 0x6003, 0x44: 0x6003, 0x45: 0x6003,
+	0x46: 0x6003, 0x47: 0x6003, 0x48: 0x6003, 0x49: 0x6003, 0x4a: 0x6003, 0x4b: 0x6003,
+	0x4c: 0x6003, 0x4d: 0x6003, 0x4e: 0x6003, 0x4f: 0x6003, 0x50: 0x6003, 0x51: 0x6003,
+	0x52: 0x6003, 0x53: 0x6003, 0x54: 0x6003, 0x55: 0x6003, 0x56: 0x6003, 0x57: 0x6003,
+	0x58: 0x6003, 0x59: 0x6003, 0x5a: 0x6003, 0x5b: 0x6003, 0x5c: 0x6003, 0x5d: 0x6003,
+	0x5e: 0x6003, 0x5f: 0x6003, 0x60: 0x6004, 0x61: 0x6004, 0x62: 0x6004, 0x63: 0x6004,
+	0x64: 0x6004, 0x65: 0x6004, 0x66: 0x6004, 0x67: 0x6004, 0x68: 0x6004, 0x69: 0x6004,
+	0x6a: 0x6004, 0x6b: 0x6004, 0x6c: 0x6004, 0x6d: 0x6004, 0x6e: 0x6004, 0x6f: 0x6004,
+	0x70: 0x6004, 0x71: 0x6004, 0x72: 0x6004, 0x73: 0x6004, 0x74: 0x6004, 0x75: 0x6004,
+	0x76: 0x6004, 0x77: 0x6004, 0x78: 0x6004, 0x79: 0x6004, 0x7a: 0x6004, 0x7b: 0x6004,
+	0x7c: 0x6004, 0x7d: 0x6004, 0x7e: 0x6004,
+	// Block 0x2, offset 0x80
+	// Block 0x3, offset 0xc0
+	0xe1: 0x2000, 0xe2: 0x6005, 0xe3: 0x6005,
+	0xe4: 0x2000, 0xe5: 0x6006, 0xe6: 0x6005, 0xe7: 0x2000, 0xe8: 0x2000,
+	0xea: 0x2000, 0xec: 0x6007, 0xed: 0x2000, 0xee: 0x2000, 0xef: 0x6008,
+	0xf0: 0x2000, 0xf1: 0x2000, 0xf2: 0x2000, 0xf3: 0x2000, 0xf4: 0x2000,
+	0xf6: 0x2000, 0xf7: 0x2000, 0xf8: 0x2000, 0xf9: 0x2000, 0xfa: 0x2000,
+	0xfc: 0x2000, 0xfd: 0x2000, 0xfe: 0x2000, 0xff: 0x2000,
+	// Block 0x4, offset 0x100
+	0x106: 0x2000,
+	0x110: 0x2000,
+	0x117: 0x2000,
+	0x118: 0x2000,
+	0x11e: 0x2000, 0x11f: 0x2000, 0x120: 0x2000, 0x121: 0x2000,
+	0x126: 0x2000, 0x128: 0x2000, 0x129: 0x2000,
+	0x12a: 0x2000, 0x12c: 0x2000, 0x12d: 0x2000,
+	0x130: 0x2000, 0x132: 0x2000, 0x133: 0x2000,
+	0x137: 0x2000, 0x138: 0x2000, 0x139: 0x2000, 0x13a: 0x2000,
+	0x13c: 0x2000, 0x13e: 0x2000,
+	// Block 0x5, offset 0x140
+	0x141: 0x2000,
+	0x151: 0x2000,
+	0x153: 0x2000,
+	0x15b: 0x2000,
+	0x166: 0x2000, 0x167: 0x2000,
+	0x16b: 0x2000,
+	0x171: 0x2000, 0x172: 0x2000, 0x173: 0x2000,
+	0x178: 0x2000,
+	0x17f: 0x2000,
+	// Block 0x6, offset 0x180
+	0x180: 0x2000, 0x181: 0x2000, 0x182: 0x2000, 0x184: 0x2000,
+	0x188: 0x2000, 0x189: 0x2000, 0x18a: 0x2000, 0x18b: 0x2000,
+	0x18d: 0x2000,
+	0x192: 0x2000, 0x193: 0x2000,
+	0x1a6: 0x2000, 0x1a7: 0x2000,
+	0x1ab: 0x2000,
+	// Block 0x7, offset 0x1c0
+	0x1ce: 0x2000, 0x1d0: 0x2000,
+	0x1d2: 0x2000, 0x1d4: 0x2000, 0x1d6: 0x2000,
+	0x1d8: 0x2000, 0x1da: 0x2000, 0x1dc: 0x2000,
+	// Block 0x8, offset 0x200
+	0x211: 0x2000,
+	0x221: 0x2000,
+	// Block 0x9, offset 0x240
+	0x244: 0x2000,
+	0x247: 0x2000, 0x249: 0x2000, 0x24a: 0x2000, 0x24b: 0x2000,
+	0x24d: 0x2000, 0x250: 0x2000,
+	0x258: 0x2000, 0x259: 0x2000, 0x25a: 0x2000, 0x25b: 0x2000, 0x25d: 0x2000,
+	0x25f: 0x2000,
+	// Block 0xa, offset 0x280
+	0x280: 0x2000, 0x281: 0x2000, 0x282: 0x2000, 0x283: 0x2000, 0x284: 0x2000, 0x285: 0x2000,
+	0x286: 0x2000, 0x287: 0x2000, 0x288: 0x2000, 0x289: 0x2000, 0x28a: 0x2000, 0x28b: 0x2000,
+	0x28c: 0x2000, 0x28d: 0x2000, 0x28e: 0x2000, 0x28f: 0x2000, 0x290: 0x2000, 0x291: 0x2000,
+	0x292: 0x2000, 0x293: 0x2000, 0x294: 0x2000, 0x295: 0x2000, 0x296: 0x2000, 0x297: 0x2000,
+	0x298: 0x2000, 0x299: 0x2000, 0x29a: 0x2000, 0x29b: 0x2000, 0x29c: 0x2000, 0x29d: 0x2000,
+	0x29e: 0x2000, 0x29f: 0x2000, 0x2a0: 0x2000, 0x2a1: 0x2000, 0x2a2: 0x2000, 0x2a3: 0x2000,
+	0x2a4: 0x2000, 0x2a5: 0x2000, 0x2a6: 0x2000, 0x2a7: 0x2000, 0x2a8: 0x2000, 0x2a9: 0x2000,
+	0x2aa: 0x2000, 0x2ab: 0x2000, 0x2ac: 0x2000, 0x2ad: 0x2000, 0x2ae: 0x2000, 0x2af: 0x2000,
+	0x2b0: 0x2000, 0x2b1: 0x2000, 0x2b2: 0x2000, 0x2b3: 0x2000, 0x2b4: 0x2000, 0x2b5: 0x2000,
+	0x2b6: 0x2000, 0x2b7: 0x2000, 0x2b8: 0x2000, 0x2b9: 0x2000, 0x2ba: 0x2000, 0x2bb: 0x2000,
+	0x2bc: 0x2000, 0x2bd: 0x2000, 0x2be: 0x2000, 0x2bf: 0x2000,
+	// Block 0xb, offset 0x2c0
+	0x2c0: 0x2000, 0x2c1: 0x2000, 0x2c2: 0x2000, 0x2c3: 0x2000, 0x2c4: 0x2000, 0x2c5: 0x2000,
+	0x2c6: 0x2000, 0x2c7: 0x2000, 0x2c8: 0x2000, 0x2c9: 0x2000, 0x2ca: 0x2000, 0x2cb: 0x2000,
+	0x2cc: 0x2000, 0x2cd: 0x2000, 0x2ce: 0x2000, 0x2cf: 0x2000, 0x2d0: 0x2000, 0x2d1: 0x2000,
+	0x2d2: 0x2000, 0x2d3: 0x2000, 0x2d4: 0x2000, 0x2d5: 0x2000, 0x2d6: 0x2000, 0x2d7: 0x2000,
+	0x2d8: 0x2000, 0x2d9: 0x2000, 0x2da: 0x2000, 0x2db: 0x2000, 0x2dc: 0x2000, 0x2dd: 0x2000,
+	0x2de: 0x2000, 0x2df: 0x2000, 0x2e0: 0x2000, 0x2e1: 0x2000, 0x2e2: 0x2000, 0x2e3: 0x2000,
+	0x2e4: 0x2000, 0x2e5: 0x2000, 0x2e6: 0x2000, 0x2e7: 0x2000, 0x2e8: 0x2000, 0x2e9: 0x2000,
+	0x2ea: 0x2000, 0x2eb: 0x2000, 0x2ec: 0x2000, 0x2ed: 0x2000, 0x2ee: 0x2000, 0x2ef: 0x2000,
+	// Block 0xc, offset 0x300
+	0x311: 0x2000,
+	0x312: 0x2000, 0x313: 0x2000, 0x314: 0x2000, 0x315: 0x2000, 0x316: 0x2000, 0x317: 0x2000,
+	0x318: 0x2000, 0x319: 0x2000, 0x31a: 0x2000, 0x31b: 0x2000, 0x31c: 0x2000, 0x31d: 0x2000,
+	0x31e: 0x2000, 0x31f: 0x2000, 0x320: 0x2000, 0x321: 0x2000, 0x323: 0x2000,
+	0x324: 0x2000, 0x325: 0x2000, 0x326: 0x2000, 0x327: 0x2000, 0x328: 0x2000, 0x329: 0x2000,
+	0x331: 0x2000, 0x332: 0x2000, 0x333: 0x2000, 0x334: 0x2000, 0x335: 0x2000,
+	0x336: 0x2000, 0x337: 0x2000, 0x338: 0x2000, 0x339: 0x2000, 0x33a: 0x2000, 0x33b: 0x2000,
+	0x33c: 0x2000, 0x33d: 0x2000, 0x33e: 0x2000, 0x33f: 0x2000,
+	// Block 0xd, offset 0x340
+	0x340: 0x2000, 0x341: 0x2000, 0x343: 0x2000, 0x344: 0x2000, 0x345: 0x2000,
+	0x346: 0x2000, 0x347: 0x2000, 0x348: 0x2000, 0x349: 0x2000,
+	// Block 0xe, offset 0x380
+	0x381: 0x2000,
+	0x390: 0x2000, 0x391: 0x2000,
+	0x392: 0x2000, 0x393: 0x2000, 0x394: 0x2000, 0x395: 0x2000, 0x396: 0x2000, 0x397: 0x2000,
+	0x398: 0x2000, 0x399: 0x2000, 0x39a: 0x2000, 0x39b: 0x2000, 0x39c: 0x2000, 0x39d: 0x2000,
+	0x39e: 0x2000, 0x39f: 0x2000, 0x3a0: 0x2000, 0x3a1: 0x2000, 0x3a2: 0x2000, 0x3a3: 0x2000,
+	0x3a4: 0x2000, 0x3a5: 0x2000, 0x3a6: 0x2000, 0x3a7: 0x2000, 0x3a8: 0x2000, 0x3a9: 0x2000,
+	0x3aa: 0x2000, 0x3ab: 0x2000, 0x3ac: 0x2000, 0x3ad: 0x2000, 0x3ae: 0x2000, 0x3af: 0x2000,
+	0x3b0: 0x2000, 0x3b1: 0x2000, 0x3b2: 0x2000, 0x3b3: 0x2000, 0x3b4: 0x2000, 0x3b5: 0x2000,
+	0x3b6: 0x2000, 0x3b7: 0x2000, 0x3b8: 0x2000, 0x3b9: 0x2000, 0x3ba: 0x2000, 0x3bb: 0x2000,
+	0x3bc: 0x2000, 0x3bd: 0x2000, 0x3be: 0x2000, 0x3bf: 0x2000,
+	// Block 0xf, offset 0x3c0
+	0x3c0: 0x2000, 0x3c1: 0x2000, 0x3c2: 0x2000, 0x3c3: 0x2000, 0x3c4: 0x2000, 0x3c5: 0x2000,
+	0x3c6: 0x2000, 0x3c7: 0x2000, 0x3c8: 0x2000, 0x3c9: 0x2000, 0x3ca: 0x2000, 0x3cb: 0x2000,
+	0x3cc: 0x2000, 0x3cd: 0x2000, 0x3ce: 0x2000, 0x3cf: 0x2000, 0x3d1: 0x2000,
+	// Block 0x10, offset 0x400
+	0x400: 0x4000, 0x401: 0x4000, 0x402: 0x4000, 0x403: 0x4000, 0x404: 0x4000, 0x405: 0x4000,
+	0x406: 0x4000, 0x407: 0x4000, 0x408: 0x4000, 0x409: 0x4000, 0x40a: 0x4000, 0x40b: 0x4000,
+	0x40c: 0x4000, 0x40d: 0x4000, 0x40e: 0x4000, 0x40f: 0x4000, 0x410: 0x4000, 0x411: 0x4000,
+	0x412: 0x4000, 0x413: 0x4000, 0x414: 0x4000, 0x415: 0x4000, 0x416: 0x4000, 0x417: 0x4000,
+	0x418: 0x4000, 0x419: 0x4000, 0x41a: 0x4000, 0x41b: 0x4000, 0x41c: 0x4000, 0x41d: 0x4000,
+	0x41e: 0x4000, 0x41f: 0x4000, 0x420: 0x4000, 0x421: 0x4000, 0x422: 0x4000, 0x423: 0x4000,
+	0x424: 0x4000, 0x425: 0x4000, 0x426: 0x4000, 0x427: 0x4000, 0x428: 0x4000, 0x429: 0x4000,
+	0x42a: 0x4000, 0x42b: 0x4000, 0x42c: 0x4000, 0x42d: 0x4000, 0x42e: 0x4000, 0x42f: 0x4000,
+	0x430: 0x4000, 0x431: 0x4000, 0x432: 0x4000, 0x433: 0x4000, 0x434: 0x4000, 0x435: 0x4000,
+	0x436: 0x4000, 0x437: 0x4000, 0x438: 0x4000, 0x439: 0x4000, 0x43a: 0x4000, 0x43b: 0x4000,
+	0x43c: 0x4000, 0x43d: 0x4000, 0x43e: 0x4000, 0x43f: 0x4000,
+	// Block 0x11, offset 0x440
+	0x440: 0x4000, 0x441: 0x4000, 0x442: 0x4000, 0x443: 0x4000, 0x444: 0x4000, 0x445: 0x4000,
+	0x446: 0x4000, 0x447: 0x4000, 0x448: 0x4000, 0x449: 0x4000, 0x44a: 0x4000, 0x44b: 0x4000,
+	0x44c: 0x4000, 0x44d: 0x4000, 0x44e: 0x4000, 0x44f: 0x4000, 0x450: 0x4000, 0x451: 0x4000,
+	0x452: 0x4000, 0x453: 0x4000, 0x454: 0x4000, 0x455: 0x4000, 0x456: 0x4000, 0x457: 0x4000,
+	0x458: 0x4000, 0x459: 0x4000, 0x45a: 0x4000, 0x45b: 0x4000, 0x45c: 0x4000, 0x45d: 0x4000,
+	0x45e: 0x4000, 0x45f: 0x4000,
+	// Block 0x12, offset 0x480
+	0x490: 0x2000,
+	0x493: 0x2000, 0x494: 0x2000, 0x495: 0x2000, 0x496: 0x2000,
+	0x498: 0x2000, 0x499: 0x2000, 0x49c: 0x2000, 0x49d: 0x2000,
+	0x4a0: 0x2000, 0x4a1: 0x2000, 0x4a2: 0x2000,
+	0x4a4: 0x2000, 0x4a5: 0x2000, 0x4a6: 0x2000, 0x4a7: 0x2000,
+	0x4b0: 0x2000, 0x4b2: 0x2000, 0x4b3: 0x2000, 0x4b5: 0x2000,
+	0x4bb: 0x2000,
+	0x4be: 0x2000,
+	// Block 0x13, offset 0x4c0
+	0x4f4: 0x2000,
+	0x4ff: 0x2000,
+	// Block 0x14, offset 0x500
+	0x501: 0x2000, 0x502: 0x2000, 0x503: 0x2000, 0x504: 0x2000,
+	0x529: 0xa009,
+	0x52c: 0x2000,
+	// Block 0x15, offset 0x540
+	0x543: 0x2000, 0x545: 0x2000,
+	0x549: 0x2000,
+	0x553: 0x2000, 0x556: 0x2000,
+	0x561: 0x2000, 0x562: 0x2000,
+	0x566: 0x2000,
+	0x56b: 0x2000,
+	// Block 0x16, offset 0x580
+	0x593: 0x2000, 0x594: 0x2000,
+	0x59b: 0x2000, 0x59c: 0x2000, 0x59d: 0x2000,
+	0x59e: 0x2000, 0x5a0: 0x2000, 0x5a1: 0x2000, 0x5a2: 0x2000, 0x5a3: 0x2000,
+	0x5a4: 0x2000, 0x5a5: 0x2000, 0x5a6: 0x2000, 0x5a7: 0x2000, 0x5a8: 0x2000, 0x5a9: 0x2000,
+	0x5aa: 0x2000, 0x5ab: 0x2000,
+	0x5b0: 0x2000, 0x5b1: 0x2000, 0x5b2: 0x2000, 0x5b3: 0x2000, 0x5b4: 0x2000, 0x5b5: 0x2000,
+	0x5b6: 0x2000, 0x5b7: 0x2000, 0x5b8: 0x2000, 0x5b9: 0x2000,
+	// Block 0x17, offset 0x5c0
+	0x5c9: 0x2000,
+	0x5d0: 0x200a, 0x5d1: 0x200b,
+	0x5d2: 0x200a, 0x5d3: 0x200c, 0x5d4: 0x2000, 0x5d5: 0x2000, 0x5d6: 0x2000, 0x5d7: 0x2000,
+	0x5d8: 0x2000, 0x5d9: 0x2000,
+	0x5f8: 0x2000, 0x5f9: 0x2000,
+	// Block 0x18, offset 0x600
+	0x612: 0x2000, 0x614: 0x2000,
+	0x627: 0x2000,
+	// Block 0x19, offset 0x640
+	0x640: 0x2000, 0x642: 0x2000, 0x643: 0x2000,
+	0x647: 0x2000, 0x648: 0x2000, 0x64b: 0x2000,
+	0x64f: 0x2000, 0x651: 0x2000,
+	0x655: 0x2000,
+	0x65a: 0x2000, 0x65d: 0x2000,
+	0x65e: 0x2000, 0x65f: 0x2000, 0x660: 0x2000, 0x663: 0x2000,
+	0x665: 0x2000, 0x667: 0x2000, 0x668: 0x2000, 0x669: 0x2000,
+	0x66a: 0x2000, 0x66b: 0x2000, 0x66c: 0x2000, 0x66e: 0x2000,
+	0x674: 0x2000, 0x675: 0x2000,
+	0x676: 0x2000, 0x677: 0x2000,
+	0x67c: 0x2000, 0x67d: 0x2000,
+	// Block 0x1a, offset 0x680
+	0x688: 0x2000,
+	0x68c: 0x2000,
+	0x692: 0x2000,
+	0x6a0: 0x2000, 0x6a1: 0x2000,
+	0x6a4: 0x2000, 0x6a5: 0x2000, 0x6a6: 0x2000, 0x6a7: 0x2000,
+	0x6aa: 0x2000, 0x6ab: 0x2000, 0x6ae: 0x2000, 0x6af: 0x2000,
+	// Block 0x1b, offset 0x6c0
+	0x6c2: 0x2000, 0x6c3: 0x2000,
+	0x6c6: 0x2000, 0x6c7: 0x2000,
+	0x6d5: 0x2000,
+	0x6d9: 0x2000,
+	0x6e5: 0x2000,
+	0x6ff: 0x2000,
+	// Block 0x1c, offset 0x700
+	0x712: 0x2000,
+	0x71a: 0x4000, 0x71b: 0x4000,
+	0x729: 0x4000,
+	0x72a: 0x4000,
+	// Block 0x1d, offset 0x740
+	0x769: 0x4000,
+	0x76a: 0x4000, 0x76b: 0x4000, 0x76c: 0x4000,
+	0x770: 0x4000, 0x773: 0x4000,
+	// Block 0x1e, offset 0x780
+	0x7a0: 0x2000, 0x7a1: 0x2000, 0x7a2: 0x2000, 0x7a3: 0x2000,
+	0x7a4: 0x2000, 0x7a5: 0x2000, 0x7a6: 0x2000, 0x7a7: 0x2000, 0x7a8: 0x2000, 0x7a9: 0x2000,
+	0x7aa: 0x2000, 0x7ab: 0x2000, 0x7ac: 0x2000, 0x7ad: 0x2000, 0x7ae: 0x2000, 0x7af: 0x2000,
+	0x7b0: 0x2000, 0x7b1: 0x2000, 0x7b2: 0x2000, 0x7b3: 0x2000, 0x7b4: 0x2000, 0x7b5: 0x2000,
+	0x7b6: 0x2000, 0x7b7: 0x2000, 0x7b8: 0x2000, 0x7b9: 0x2000, 0x7ba: 0x2000, 0x7bb: 0x2000,
+	0x7bc: 0x2000, 0x7bd: 0x2000, 0x7be: 0x2000, 0x7bf: 0x2000,
+	// Block 0x1f, offset 0x7c0
+	0x7c0: 0x2000, 0x7c1: 0x2000, 0x7c2: 0x2000, 0x7c3: 0x2000, 0x7c4: 0x2000, 0x7c5: 0x2000,
+	0x7c6: 0x2000, 0x7c7: 0x2000, 0x7c8: 0x2000, 0x7c9: 0x2000, 0x7ca: 0x2000, 0x7cb: 0x2000,
+	0x7cc: 0x2000, 0x7cd: 0x2000, 0x7ce: 0x2000, 0x7cf: 0x2000, 0x7d0: 0x2000, 0x7d1: 0x2000,
+	0x7d2: 0x2000, 0x7d3: 0x2000, 0x7d4: 0x2000, 0x7d5: 0x2000, 0x7d6: 0x2000, 0x7d7: 0x2000,
+	0x7d8: 0x2000, 0x7d9: 0x2000, 0x7da: 0x2000, 0x7db: 0x2000, 0x7dc: 0x2000, 0x7dd: 0x2000,
+	0x7de: 0x2000, 0x7df: 0x2000, 0x7e0: 0x2000, 0x7e1: 0x2000, 0x7e2: 0x2000, 0x7e3: 0x2000,
+	0x7e4: 0x2000, 0x7e5: 0x2000, 0x7e6: 0x2000, 0x7e7: 0x2000, 0x7e8: 0x2000, 0x7e9: 0x2000,
+	0x7eb: 0x2000, 0x7ec: 0x2000, 0x7ed: 0x2000, 0x7ee: 0x2000, 0x7ef: 0x2000,
+	0x7f0: 0x2000, 0x7f1: 0x2000, 0x7f2: 0x2000, 0x7f3: 0x2000, 0x7f4: 0x2000, 0x7f5: 0x2000,
+	0x7f6: 0x2000, 0x7f7: 0x2000, 0x7f8: 0x2000, 0x7f9: 0x2000, 0x7fa: 0x2000, 0x7fb: 0x2000,
+	0x7fc: 0x2000, 0x7fd: 0x2000, 0x7fe: 0x2000, 0x7ff: 0x2000,
+	// Block 0x20, offset 0x800
+	0x800: 0x2000, 0x801: 0x2000, 0x802: 0x200d, 0x803: 0x2000, 0x804: 0x2000, 0x805: 0x2000,
+	0x806: 0x2000, 0x807: 0x2000, 0x808: 0x2000, 0x809: 0x2000, 0x80a: 0x2000, 0x80b: 0x2000,
+	0x80c: 0x2000, 0x80d: 0x2000, 0x80e: 0x2000, 0x80f: 0x2000, 0x810: 0x2000, 0x811: 0x2000,
+	0x812: 0x2000, 0x813: 0x2000, 0x814: 0x2000, 0x815: 0x2000, 0x816: 0x2000, 0x817: 0x2000,
+	0x818: 0x2000, 0x819: 0x2000, 0x81a: 0x2000, 0x81b: 0x2000, 0x81c: 0x2000, 0x81d: 0x2000,
+	0x81e: 0x2000, 0x81f: 0x2000, 0x820: 0x2000, 0x821: 0x2000, 0x822: 0x2000, 0x823: 0x2000,
+	0x824: 0x2000, 0x825: 0x2000, 0x826: 0x2000, 0x827: 0x2000, 0x828: 0x2000, 0x829: 0x2000,
+	0x82a: 0x2000, 0x82b: 0x2000, 0x82c: 0x2000, 0x82d: 0x2000, 0x82e: 0x2000, 0x82f: 0x2000,
+	0x830: 0x2000, 0x831: 0x2000, 0x832: 0x2000, 0x833: 0x2000, 0x834: 0x2000, 0x835: 0x2000,
+	0x836: 0x2000, 0x837: 0x2000, 0x838: 0x2000, 0x839: 0x2000, 0x83a: 0x2000, 0x83b: 0x2000,
+	0x83c: 0x2000, 0x83d: 0x2000, 0x83e: 0x2000, 0x83f: 0x2000,
+	// Block 0x21, offset 0x840
+	0x840: 0x2000, 0x841: 0x2000, 0x842: 0x2000, 0x843: 0x2000, 0x844: 0x2000, 0x845: 0x2000,
+	0x846: 0x2000, 0x847: 0x2000, 0x848: 0x2000, 0x849: 0x2000, 0x84a: 0x2000, 0x84b: 0x2000,
+	0x850: 0x2000, 0x851: 0x2000,
+	0x852: 0x2000, 0x853: 0x2000, 0x854: 0x2000, 0x855: 0x2000, 0x856: 0x2000, 0x857: 0x2000,
+	0x858: 0x2000, 0x859: 0x2000, 0x85a: 0x2000, 0x85b: 0x2000, 0x85c: 0x2000, 0x85d: 0x2000,
+	0x85e: 0x2000, 0x85f: 0x2000, 0x860: 0x2000, 0x861: 0x2000, 0x862: 0x2000, 0x863: 0x2000,
+	0x864: 0x2000, 0x865: 0x2000, 0x866: 0x2000, 0x867: 0x2000, 0x868: 0x2000, 0x869: 0x2000,
+	0x86a: 0x2000, 0x86b: 0x2000, 0x86c: 0x2000, 0x86d: 0x2000, 0x86e: 0x2000, 0x86f: 0x2000,
+	0x870: 0x2000, 0x871: 0x2000, 0x872: 0x2000, 0x873: 0x2000,
+	// Block 0x22, offset 0x880
+	0x880: 0x2000, 0x881: 0x2000, 0x882: 0x2000, 0x883: 0x2000, 0x884: 0x2000, 0x885: 0x2000,
+	0x886: 0x2000, 0x887: 0x2000, 0x888: 0x2000, 0x889: 0x2000, 0x88a: 0x2000, 0x88b: 0x2000,
+	0x88c: 0x2000, 0x88d: 0x2000, 0x88e: 0x2000, 0x88f: 0x2000,
+	0x892: 0x2000, 0x893: 0x2000, 0x894: 0x2000, 0x895: 0x2000,
+	0x8a0: 0x200e, 0x8a1: 0x2000, 0x8a3: 0x2000,
+	0x8a4: 0x2000, 0x8a5: 0x2000, 0x8a6: 0x2000, 0x8a7: 0x2000, 0x8a8: 0x2000, 0x8a9: 0x2000,
+	0x8b2: 0x2000, 0x8b3: 0x2000,
+	0x8b6: 0x2000, 0x8b7: 0x2000,
+	0x8bc: 0x2000, 0x8bd: 0x2000,
+	// Block 0x23, offset 0x8c0
+	0x8c0: 0x2000, 0x8c1: 0x2000,
+	0x8c6: 0x2000, 0x8c7: 0x2000, 0x8c8: 0x2000, 0x8cb: 0x200f,
+	0x8ce: 0x2000, 0x8cf: 0x2000, 0x8d0: 0x2000, 0x8d1: 0x2000,
+	0x8e2: 0x2000, 0x8e3: 0x2000,
+	0x8e4: 0x2000, 0x8e5: 0x2000,
+	0x8ef: 0x2000,
+	0x8fd: 0x4000, 0x8fe: 0x4000,
+	// Block 0x24, offset 0x900
+	0x905: 0x2000,
+	0x906: 0x2000, 0x909: 0x2000,
+	0x90e: 0x2000, 0x90f: 0x2000,
+	0x914: 0x4000, 0x915: 0x4000,
+	0x91c: 0x2000,
+	0x91e: 0x2000,
+	// Block 0x25, offset 0x940
+	0x940: 0x2000, 0x942: 0x2000,
+	0x948: 0x4000, 0x949: 0x4000, 0x94a: 0x4000, 0x94b: 0x4000,
+	0x94c: 0x4000, 0x94d: 0x4000, 0x94e: 0x4000, 0x94f: 0x4000, 0x950: 0x4000, 0x951: 0x4000,
+	0x952: 0x4000, 0x953: 0x4000,
+	0x960: 0x2000, 0x961: 0x2000, 0x963: 0x2000,
+	0x964: 0x2000, 0x965: 0x2000, 0x967: 0x2000, 0x968: 0x2000, 0x969: 0x2000,
+	0x96a: 0x2000, 0x96c: 0x2000, 0x96d: 0x2000, 0x96f: 0x2000,
+	0x97f: 0x4000,
+	// Block 0x26, offset 0x980
+	0x993: 0x4000,
+	0x99e: 0x2000, 0x99f: 0x2000, 0x9a1: 0x4000,
+	0x9aa: 0x4000, 0x9ab: 0x4000,
+	0x9bd: 0x4000, 0x9be: 0x4000, 0x9bf: 0x2000,
+	// Block 0x27, offset 0x9c0
+	0x9c4: 0x4000, 0x9c5: 0x4000,
+	0x9c6: 0x2000, 0x9c7: 0x2000, 0x9c8: 0x2000, 0x9c9: 0x2000, 0x9ca: 0x2000, 0x9cb: 0x2000,
+	0x9cc: 0x2000, 0x9cd: 0x2000, 0x9ce: 0x4000, 0x9cf: 0x2000, 0x9d0: 0x2000, 0x9d1: 0x2000,
+	0x9d2: 0x2000, 0x9d3: 0x2000, 0x9d4: 0x4000, 0x9d5: 0x2000, 0x9d6: 0x2000, 0x9d7: 0x2000,
+	0x9d8: 0x2000, 0x9d9: 0x2000, 0x9da: 0x2000, 0x9db: 0x2000, 0x9dc: 0x2000, 0x9dd: 0x2000,
+	0x9de: 0x2000, 0x9df: 0x2000, 0x9e0: 0x2000, 0x9e1: 0x2000, 0x9e3: 0x2000,
+	0x9e8: 0x2000, 0x9e9: 0x2000,
+	0x9ea: 0x4000, 0x9eb: 0x2000, 0x9ec: 0x2000, 0x9ed: 0x2000, 0x9ee: 0x2000, 0x9ef: 0x2000,
+	0x9f0: 0x2000, 0x9f1: 0x2000, 0x9f2: 0x4000, 0x9f3: 0x4000, 0x9f4: 0x2000, 0x9f5: 0x4000,
+	0x9f6: 0x2000, 0x9f7: 0x2000, 0x9f8: 0x2000, 0x9f9: 0x2000, 0x9fa: 0x4000, 0x9fb: 0x2000,
+	0x9fc: 0x2000, 0x9fd: 0x4000, 0x9fe: 0x2000, 0x9ff: 0x2000,
+	// Block 0x28, offset 0xa00
+	0xa05: 0x4000,
+	0xa0a: 0x4000, 0xa0b: 0x4000,
+	0xa28: 0x4000,
+	0xa3d: 0x2000,
+	// Block 0x29, offset 0xa40
+	0xa4c: 0x4000, 0xa4e: 0x4000,
+	0xa53: 0x4000, 0xa54: 0x4000, 0xa55: 0x4000, 0xa57: 0x4000,
+	0xa76: 0x2000, 0xa77: 0x2000, 0xa78: 0x2000, 0xa79: 0x2000, 0xa7a: 0x2000, 0xa7b: 0x2000,
+	0xa7c: 0x2000, 0xa7d: 0x2000, 0xa7e: 0x2000, 0xa7f: 0x2000,
+	// Block 0x2a, offset 0xa80
+	0xa95: 0x4000, 0xa96: 0x4000, 0xa97: 0x4000,
+	0xab0: 0x4000,
+	0xabf: 0x4000,
+	// Block 0x2b, offset 0xac0
+	0xae6: 0x6000, 0xae7: 0x6000, 0xae8: 0x6000, 0xae9: 0x6000,
+	0xaea: 0x6000, 0xaeb: 0x6000, 0xaec: 0x6000, 0xaed: 0x6000,
+	// Block 0x2c, offset 0xb00
+	0xb05: 0x6010,
+	0xb06: 0x6011,
+	// Block 0x2d, offset 0xb40
+	0xb5b: 0x4000, 0xb5c: 0x4000,
+	// Block 0x2e, offset 0xb80
+	0xb90: 0x4000,
+	0xb95: 0x4000, 0xb96: 0x2000, 0xb97: 0x2000,
+	0xb98: 0x2000, 0xb99: 0x2000,
+	// Block 0x2f, offset 0xbc0
+	0xbc0: 0x4000, 0xbc1: 0x4000, 0xbc2: 0x4000, 0xbc3: 0x4000, 0xbc4: 0x4000, 0xbc5: 0x4000,
+	0xbc6: 0x4000, 0xbc7: 0x4000, 0xbc8: 0x4000, 0xbc9: 0x4000, 0xbca: 0x4000, 0xbcb: 0x4000,
+	0xbcc: 0x4000, 0xbcd: 0x4000, 0xbce: 0x4000, 0xbcf: 0x4000, 0xbd0: 0x4000, 0xbd1: 0x4000,
+	0xbd2: 0x4000, 0xbd3: 0x4000, 0xbd4: 0x4000, 0xbd5: 0x4000, 0xbd6: 0x4000, 0xbd7: 0x4000,
+	0xbd8: 0x4000, 0xbd9: 0x4000, 0xbdb: 0x4000, 0xbdc: 0x4000, 0xbdd: 0x4000,
+	0xbde: 0x4000, 0xbdf: 0x4000, 0xbe0: 0x4000, 0xbe1: 0x4000, 0xbe2: 0x4000, 0xbe3: 0x4000,
+	0xbe4: 0x4000, 0xbe5: 0x4000, 0xbe6: 0x4000, 0xbe7: 0x4000, 0xbe8: 0x4000, 0xbe9: 0x4000,
+	0xbea: 0x4000, 0xbeb: 0x4000, 0xbec: 0x4000, 0xbed: 0x4000, 0xbee: 0x4000, 0xbef: 0x4000,
+	0xbf0: 0x4000, 0xbf1: 0x4000, 0xbf2: 0x4000, 0xbf3: 0x4000, 0xbf4: 0x4000, 0xbf5: 0x4000,
+	0xbf6: 0x4000, 0xbf7: 0x4000, 0xbf8: 0x4000, 0xbf9: 0x4000, 0xbfa: 0x4000, 0xbfb: 0x4000,
+	0xbfc: 0x4000, 0xbfd: 0x4000, 0xbfe: 0x4000, 0xbff: 0x4000,
+	// Block 0x30, offset 0xc00
+	0xc00: 0x4000, 0xc01: 0x4000, 0xc02: 0x4000, 0xc03: 0x4000, 0xc04: 0x4000, 0xc05: 0x4000,
+	0xc06: 0x4000, 0xc07: 0x4000, 0xc08: 0x4000, 0xc09: 0x4000, 0xc0a: 0x4000, 0xc0b: 0x4000,
+	0xc0c: 0x4000, 0xc0d: 0x4000, 0xc0e: 0x4000, 0xc0f: 0x4000, 0xc10: 0x4000, 0xc11: 0x4000,
+	0xc12: 0x4000, 0xc13: 0x4000, 0xc14: 0x4000, 0xc15: 0x4000, 0xc16: 0x4000, 0xc17: 0x4000,
+	0xc18: 0x4000, 0xc19: 0x4000, 0xc1a: 0x4000, 0xc1b: 0x4000, 0xc1c: 0x4000, 0xc1d: 0x4000,
+	0xc1e: 0x4000, 0xc1f: 0x4000, 0xc20: 0x4000, 0xc21: 0x4000, 0xc22: 0x4000, 0xc23: 0x4000,
+	0xc24: 0x4000, 0xc25: 0x4000, 0xc26: 0x4000, 0xc27: 0x4000, 0xc28: 0x4000, 0xc29: 0x4000,
+	0xc2a: 0x4000, 0xc2b: 0x4000, 0xc2c: 0x4000, 0xc2d: 0x4000, 0xc2e: 0x4000, 0xc2f: 0x4000,
+	0xc30: 0x4000, 0xc31: 0x4000, 0xc32: 0x4000, 0xc33: 0x4000,
+	// Block 0x31, offset 0xc40
+	0xc40: 0x4000, 0xc41: 0x4000, 0xc42: 0x4000, 0xc43: 0x4000, 0xc44: 0x4000, 0xc45: 0x4000,
+	0xc46: 0x4000, 0xc47: 0x4000, 0xc48: 0x4000, 0xc49: 0x4000, 0xc4a: 0x4000, 0xc4b: 0x4000,
+	0xc4c: 0x4000, 0xc4d: 0x4000, 0xc4e: 0x4000, 0xc4f: 0x4000, 0xc50: 0x4000, 0xc51: 0x4000,
+	0xc52: 0x4000, 0xc53: 0x4000, 0xc54: 0x4000, 0xc55: 0x4000,
+	0xc70: 0x4000, 0xc71: 0x4000, 0xc72: 0x4000, 0xc73: 0x4000, 0xc74: 0x4000, 0xc75: 0x4000,
+	0xc76: 0x4000, 0xc77: 0x4000, 0xc78: 0x4000, 0xc79: 0x4000, 0xc7a: 0x4000, 0xc7b: 0x4000,
+	// Block 0x32, offset 0xc80
+	0xc80: 0x9012, 0xc81: 0x4013, 0xc82: 0x4014, 0xc83: 0x4000, 0xc84: 0x4000, 0xc85: 0x4000,
+	0xc86: 0x4000, 0xc87: 0x4000, 0xc88: 0x4000, 0xc89: 0x4000, 0xc8a: 0x4000, 0xc8b: 0x4000,
+	0xc8c: 0x4015, 0xc8d: 0x4015, 0xc8e: 0x4000, 0xc8f: 0x4000, 0xc90: 0x4000, 0xc91: 0x4000,
+	0xc92: 0x4000, 0xc93: 0x4000, 0xc94: 0x4000, 0xc95: 0x4000, 0xc96: 0x4000, 0xc97: 0x4000,
+	0xc98: 0x4000, 0xc99: 0x4000, 0xc9a: 0x4000, 0xc9b: 0x4000, 0xc9c: 0x4000, 0xc9d: 0x4000,
+	0xc9e: 0x4000, 0xc9f: 0x4000, 0xca0: 0x4000, 0xca1: 0x4000, 0xca2: 0x4000, 0xca3: 0x4000,
+	0xca4: 0x4000, 0xca5: 0x4000, 0xca6: 0x4000, 0xca7: 0x4000, 0xca8: 0x4000, 0xca9: 0x4000,
+	0xcaa: 0x4000, 0xcab: 0x4000, 0xcac: 0x4000, 0xcad: 0x4000, 0xcae: 0x4000, 0xcaf: 0x4000,
+	0xcb0: 0x4000, 0xcb1: 0x4000, 0xcb2: 0x4000, 0xcb3: 0x4000, 0xcb4: 0x4000, 0xcb5: 0x4000,
+	0xcb6: 0x4000, 0xcb7: 0x4000, 0xcb8: 0x4000, 0xcb9: 0x4000, 0xcba: 0x4000, 0xcbb: 0x4000,
+	0xcbc: 0x4000, 0xcbd: 0x4000, 0xcbe: 0x4000,
+	// Block 0x33, offset 0xcc0
+	0xcc1: 0x4000, 0xcc2: 0x4000, 0xcc3: 0x4000, 0xcc4: 0x4000, 0xcc5: 0x4000,
+	0xcc6: 0x4000, 0xcc7: 0x4000, 0xcc8: 0x4000, 0xcc9: 0x4000, 0xcca: 0x4000, 0xccb: 0x4000,
+	0xccc: 0x4000, 0xccd: 0x4000, 0xcce: 0x4000, 0xccf: 0x4000, 0xcd0: 0x4000, 0xcd1: 0x4000,
+	0xcd2: 0x4000, 0xcd3: 0x4000, 0xcd4: 0x4000, 0xcd5: 0x4000, 0xcd6: 0x4000, 0xcd7: 0x4000,
+	0xcd8: 0x4000, 0xcd9: 0x4000, 0xcda: 0x4000, 0xcdb: 0x4000, 0xcdc: 0x4000, 0xcdd: 0x4000,
+	0xcde: 0x4000, 0xcdf: 0x4000, 0xce0: 0x4000, 0xce1: 0x4000, 0xce2: 0x4000, 0xce3: 0x4000,
+	0xce4: 0x4000, 0xce5: 0x4000, 0xce6: 0x4000, 0xce7: 0x4000, 0xce8: 0x4000, 0xce9: 0x4000,
+	0xcea: 0x4000, 0xceb: 0x4000, 0xcec: 0x4000, 0xced: 0x4000, 0xcee: 0x4000, 0xcef: 0x4000,
+	0xcf0: 0x4000, 0xcf1: 0x4000, 0xcf2: 0x4000, 0xcf3: 0x4000, 0xcf4: 0x4000, 0xcf5: 0x4000,
+	0xcf6: 0x4000, 0xcf7: 0x4000, 0xcf8: 0x4000, 0xcf9: 0x4000, 0xcfa: 0x4000, 0xcfb: 0x4000,
+	0xcfc: 0x4000, 0xcfd: 0x4000, 0xcfe: 0x4000, 0xcff: 0x4000,
+	// Block 0x34, offset 0xd00
+	0xd00: 0x4000, 0xd01: 0x4000, 0xd02: 0x4000, 0xd03: 0x4000, 0xd04: 0x4000, 0xd05: 0x4000,
+	0xd06: 0x4000, 0xd07: 0x4000, 0xd08: 0x4000, 0xd09: 0x4000, 0xd0a: 0x4000, 0xd0b: 0x4000,
+	0xd0c: 0x4000, 0xd0d: 0x4000, 0xd0e: 0x4000, 0xd0f: 0x4000, 0xd10: 0x4000, 0xd11: 0x4000,
+	0xd12: 0x4000, 0xd13: 0x4000, 0xd14: 0x4000, 0xd15: 0x4000, 0xd16: 0x4000,
+	0xd19: 0x4016, 0xd1a: 0x4017, 0xd1b: 0x4000, 0xd1c: 0x4000, 0xd1d: 0x4000,
+	0xd1e: 0x4000, 0xd1f: 0x4000, 0xd20: 0x4000, 0xd21: 0x4018, 0xd22: 0x4019, 0xd23: 0x401a,
+	0xd24: 0x401b, 0xd25: 0x401c, 0xd26: 0x401d, 0xd27: 0x401e, 0xd28: 0x401f, 0xd29: 0x4020,
+	0xd2a: 0x4021, 0xd2b: 0x4022, 0xd2c: 0x4000, 0xd2d: 0x4010, 0xd2e: 0x4000, 0xd2f: 0x4023,
+	0xd30: 0x4000, 0xd31: 0x4024, 0xd32: 0x4000, 0xd33: 0x4025, 0xd34: 0x4000, 0xd35: 0x4026,
+	0xd36: 0x4000, 0xd37: 0x401a, 0xd38: 0x4000, 0xd39: 0x4027, 0xd3a: 0x4000, 0xd3b: 0x4028,
+	0xd3c: 0x4000, 0xd3d: 0x4020, 0xd3e: 0x4000, 0xd3f: 0x4029,
+	// Block 0x35, offset 0xd40
+	0xd40: 0x4000, 0xd41: 0x402a, 0xd42: 0x4000, 0xd43: 0x402b, 0xd44: 0x402c, 0xd45: 0x4000,
+	0xd46: 0x4017, 0xd47: 0x4000, 0xd48: 0x402d, 0xd49: 0x4000, 0xd4a: 0x402e, 0xd4b: 0x402f,
+	0xd4c: 0x4030, 0xd4d: 0x4017, 0xd4e: 0x4016, 0xd4f: 0x4017, 0xd50: 0x4000, 0xd51: 0x4000,
+	0xd52: 0x4031, 0xd53: 0x4000, 0xd54: 0x4000, 0xd55: 0x4031, 0xd56: 0x4000, 0xd57: 0x4000,
+	0xd58: 0x4032, 0xd59: 0x4000, 0xd5a: 0x4000, 0xd5b: 0x4032, 0xd5c: 0x4000, 0xd5d: 0x4000,
+	0xd5e: 0x4033, 0xd5f: 0x402e, 0xd60: 0x4034, 0xd61: 0x4035, 0xd62: 0x4034, 0xd63: 0x4036,
+	0xd64: 0x4037, 0xd65: 0x4024, 0xd66: 0x4035, 0xd67: 0x4025, 0xd68: 0x4038, 0xd69: 0x4038,
+	0xd6a: 0x4039, 0xd6b: 0x4039, 0xd6c: 0x403a, 0xd6d: 0x403a, 0xd6e: 0x4000, 0xd6f: 0x4035,
+	0xd70: 0x4000, 0xd71: 0x4000, 0xd72: 0x403b, 0xd73: 0x403c, 0xd74: 0x4000, 0xd75: 0x4000,
+	0xd76: 0x4000, 0xd77: 0x4000, 0xd78: 0x4000, 0xd79: 0x4000, 0xd7a: 0x4000, 0xd7b: 0x403d,
+	0xd7c: 0x401c, 0xd7d: 0x4000, 0xd7e: 0x4000, 0xd7f: 0x4000,
+	// Block 0x36, offset 0xd80
+	0xd85: 0x4000,
+	0xd86: 0x4000, 0xd87: 0x4000, 0xd88: 0x4000, 0xd89: 0x4000, 0xd8a: 0x4000, 0xd8b: 0x4000,
+	0xd8c: 0x4000, 0xd8d: 0x4000, 0xd8e: 0x4000, 0xd8f: 0x4000, 0xd90: 0x4000, 0xd91: 0x4000,
+	0xd92: 0x4000, 0xd93: 0x4000, 0xd94: 0x4000, 0xd95: 0x4000, 0xd96: 0x4000, 0xd97: 0x4000,
+	0xd98: 0x4000, 0xd99: 0x4000, 0xd9a: 0x4000, 0xd9b: 0x4000, 0xd9c: 0x4000, 0xd9d: 0x4000,
+	0xd9e: 0x4000, 0xd9f: 0x4000, 0xda0: 0x4000, 0xda1: 0x4000, 0xda2: 0x4000, 0xda3: 0x4000,
+	0xda4: 0x4000, 0xda5: 0x4000, 0xda6: 0x4000, 0xda7: 0x4000, 0xda8: 0x4000, 0xda9: 0x4000,
+	0xdaa: 0x4000, 0xdab: 0x4000, 0xdac: 0x4000, 0xdad: 0x4000, 0xdae: 0x4000, 0xdaf: 0x4000,
+	0xdb1: 0x403e, 0xdb2: 0x403e, 0xdb3: 0x403e, 0xdb4: 0x403e, 0xdb5: 0x403e,
+	0xdb6: 0x403e, 0xdb7: 0x403e, 0xdb8: 0x403e, 0xdb9: 0x403e, 0xdba: 0x403e, 0xdbb: 0x403e,
+	0xdbc: 0x403e, 0xdbd: 0x403e, 0xdbe: 0x403e, 0xdbf: 0x403e,
+	// Block 0x37, offset 0xdc0
+	0xdc0: 0x4037, 0xdc1: 0x4037, 0xdc2: 0x4037, 0xdc3: 0x4037, 0xdc4: 0x4037, 0xdc5: 0x4037,
+	0xdc6: 0x4037, 0xdc7: 0x4037, 0xdc8: 0x4037, 0xdc9: 0x4037, 0xdca: 0x4037, 0xdcb: 0x4037,
+	0xdcc: 0x4037, 0xdcd: 0x4037, 0xdce: 0x4037, 0xdcf: 0x400e, 0xdd0: 0x403f, 0xdd1: 0x4040,
+	0xdd2: 0x4041, 0xdd3: 0x4040, 0xdd4: 0x403f, 0xdd5: 0x4042, 0xdd6: 0x4043, 0xdd7: 0x4044,
+	0xdd8: 0x4040, 0xdd9: 0x4041, 0xdda: 0x4040, 0xddb: 0x4045, 0xddc: 0x4009, 0xddd: 0x4045,
+	0xdde: 0x4046, 0xddf: 0x4045, 0xde0: 0x4047, 0xde1: 0x400b, 0xde2: 0x400a, 0xde3: 0x400c,
+	0xde4: 0x4048, 0xde5: 0x4000, 0xde6: 0x4000, 0xde7: 0x4000, 0xde8: 0x4000, 0xde9: 0x4000,
+	0xdea: 0x4000, 0xdeb: 0x4000, 0xdec: 0x4000, 0xded: 0x4000, 0xdee: 0x4000, 0xdef: 0x4000,
+	0xdf0: 0x4000, 0xdf1: 0x4000, 0xdf2: 0x4000, 0xdf3: 0x4000, 0xdf4: 0x4000, 0xdf5: 0x4000,
+	0xdf6: 0x4000, 0xdf7: 0x4000, 0xdf8: 0x4000, 0xdf9: 0x4000, 0xdfa: 0x4000, 0xdfb: 0x4000,
+	0xdfc: 0x4000, 0xdfd: 0x4000, 0xdfe: 0x4000, 0xdff: 0x4000,
+	// Block 0x38, offset 0xe00
+	0xe00: 0x4000, 0xe01: 0x4000, 0xe02: 0x4000, 0xe03: 0x4000, 0xe04: 0x4000, 0xe05: 0x4000,
+	0xe06: 0x4000, 0xe07: 0x4000, 0xe08: 0x4000, 0xe09: 0x4000, 0xe0a: 0x4000, 0xe0b: 0x4000,
+	0xe0c: 0x4000, 0xe0d: 0x4000, 0xe0e: 0x4000, 0xe10: 0x4000, 0xe11: 0x4000,
+	0xe12: 0x4000, 0xe13: 0x4000, 0xe14: 0x4000, 0xe15: 0x4000, 0xe16: 0x4000, 0xe17: 0x4000,
+	0xe18: 0x4000, 0xe19: 0x4000, 0xe1a: 0x4000, 0xe1b: 0x4000, 0xe1c: 0x4000, 0xe1d: 0x4000,
+	0xe1e: 0x4000, 0xe1f: 0x4000, 0xe20: 0x4000, 0xe21: 0x4000, 0xe22: 0x4000, 0xe23: 0x4000,
+	0xe24: 0x4000, 0xe25: 0x4000, 0xe26: 0x4000, 0xe27: 0x4000, 0xe28: 0x4000, 0xe29: 0x4000,
+	0xe2a: 0x4000, 0xe2b: 0x4000, 0xe2c: 0x4000, 0xe2d: 0x4000, 0xe2e: 0x4000, 0xe2f: 0x4000,
+	0xe30: 0x4000, 0xe31: 0x4000, 0xe32: 0x4000, 0xe33: 0x4000, 0xe34: 0x4000, 0xe35: 0x4000,
+	0xe36: 0x4000, 0xe37: 0x4000, 0xe38: 0x4000, 0xe39: 0x4000, 0xe3a: 0x4000,
+	// Block 0x39, offset 0xe40
+	0xe40: 0x4000, 0xe41: 0x4000, 0xe42: 0x4000, 0xe43: 0x4000, 0xe44: 0x4000, 0xe45: 0x4000,
+	0xe46: 0x4000, 0xe47: 0x4000, 0xe48: 0x4000, 0xe49: 0x4000, 0xe4a: 0x4000, 0xe4b: 0x4000,
+	0xe4c: 0x4000, 0xe4d: 0x4000, 0xe4e: 0x4000, 0xe4f: 0x4000, 0xe50: 0x4000, 0xe51: 0x4000,
+	0xe52: 0x4000, 0xe53: 0x4000, 0xe54: 0x4000, 0xe55: 0x4000, 0xe56: 0x4000, 0xe57: 0x4000,
+	0xe58: 0x4000, 0xe59: 0x4000, 0xe5a: 0x4000, 0xe5b: 0x4000, 0xe5c: 0x4000, 0xe5d: 0x4000,
+	0xe5e: 0x4000, 0xe5f: 0x4000, 0xe60: 0x4000, 0xe61: 0x4000, 0xe62: 0x4000, 0xe63: 0x4000,
+	0xe70: 0x4000, 0xe71: 0x4000, 0xe72: 0x4000, 0xe73: 0x4000, 0xe74: 0x4000, 0xe75: 0x4000,
+	0xe76: 0x4000, 0xe77: 0x4000, 0xe78: 0x4000, 0xe79: 0x4000, 0xe7a: 0x4000, 0xe7b: 0x4000,
+	0xe7c: 0x4000, 0xe7d: 0x4000, 0xe7e: 0x4000, 0xe7f: 0x4000,
+	// Block 0x3a, offset 0xe80
+	0xe80: 0x4000, 0xe81: 0x4000, 0xe82: 0x4000, 0xe83: 0x4000, 0xe84: 0x4000, 0xe85: 0x4000,
+	0xe86: 0x4000, 0xe87: 0x4000, 0xe88: 0x4000, 0xe89: 0x4000, 0xe8a: 0x4000, 0xe8b: 0x4000,
+	0xe8c: 0x4000, 0xe8d: 0x4000, 0xe8e: 0x4000, 0xe8f: 0x4000, 0xe90: 0x4000, 0xe91: 0x4000,
+	0xe92: 0x4000, 0xe93: 0x4000, 0xe94: 0x4000, 0xe95: 0x4000, 0xe96: 0x4000, 0xe97: 0x4000,
+	0xe98: 0x4000, 0xe99: 0x4000, 0xe9a: 0x4000, 0xe9b: 0x4000, 0xe9c: 0x4000, 0xe9d: 0x4000,
+	0xe9e: 0x4000, 0xea0: 0x4000, 0xea1: 0x4000, 0xea2: 0x4000, 0xea3: 0x4000,
+	0xea4: 0x4000, 0xea5: 0x4000, 0xea6: 0x4000, 0xea7: 0x4000, 0xea8: 0x4000, 0xea9: 0x4000,
+	0xeaa: 0x4000, 0xeab: 0x4000, 0xeac: 0x4000, 0xead: 0x4000, 0xeae: 0x4000, 0xeaf: 0x4000,
+	0xeb0: 0x4000, 0xeb1: 0x4000, 0xeb2: 0x4000, 0xeb3: 0x4000, 0xeb4: 0x4000, 0xeb5: 0x4000,
+	0xeb6: 0x4000, 0xeb7: 0x4000, 0xeb8: 0x4000, 0xeb9: 0x4000, 0xeba: 0x4000, 0xebb: 0x4000,
+	0xebc: 0x4000, 0xebd: 0x4000, 0xebe: 0x4000, 0xebf: 0x4000,
+	// Block 0x3b, offset 0xec0
+	0xec0: 0x4000, 0xec1: 0x4000, 0xec2: 0x4000, 0xec3: 0x4000, 0xec4: 0x4000, 0xec5: 0x4000,
+	0xec6: 0x4000, 0xec7: 0x4000, 0xec8: 0x2000, 0xec9: 0x2000, 0xeca: 0x2000, 0xecb: 0x2000,
+	0xecc: 0x2000, 0xecd: 0x2000, 0xece: 0x2000, 0xecf: 0x2000, 0xed0: 0x4000, 0xed1: 0x4000,
+	0xed2: 0x4000, 0xed3: 0x4000, 0xed4: 0x4000, 0xed5: 0x4000, 0xed6: 0x4000, 0xed7: 0x4000,
+	0xed8: 0x4000, 0xed9: 0x4000, 0xeda: 0x4000, 0xedb: 0x4000, 0xedc: 0x4000, 0xedd: 0x4000,
+	0xede: 0x4000, 0xedf: 0x4000, 0xee0: 0x4000, 0xee1: 0x4000, 0xee2: 0x4000, 0xee3: 0x4000,
+	0xee4: 0x4000, 0xee5: 0x4000, 0xee6: 0x4000, 0xee7: 0x4000, 0xee8: 0x4000, 0xee9: 0x4000,
+	0xeea: 0x4000, 0xeeb: 0x4000, 0xeec: 0x4000, 0xeed: 0x4000, 0xeee: 0x4000, 0xeef: 0x4000,
+	0xef0: 0x4000, 0xef1: 0x4000, 0xef2: 0x4000, 0xef3: 0x4000, 0xef4: 0x4000, 0xef5: 0x4000,
+	0xef6: 0x4000, 0xef7: 0x4000, 0xef8: 0x4000, 0xef9: 0x4000, 0xefa: 0x4000, 0xefb: 0x4000,
+	0xefc: 0x4000, 0xefd: 0x4000, 0xefe: 0x4000, 0xeff: 0x4000,
+	// Block 0x3c, offset 0xf00
+	0xf00: 0x4000, 0xf01: 0x4000, 0xf02: 0x4000, 0xf03: 0x4000, 0xf04: 0x4000, 0xf05: 0x4000,
+	0xf06: 0x4000, 0xf07: 0x4000, 0xf08: 0x4000, 0xf09: 0x4000, 0xf0a: 0x4000, 0xf0b: 0x4000,
+	0xf0c: 0x4000, 0xf0d: 0x4000, 0xf0e: 0x4000, 0xf0f: 0x4000, 0xf10: 0x4000, 0xf11: 0x4000,
+	0xf12: 0x4000, 0xf13: 0x4000, 0xf14: 0x4000, 0xf15: 0x4000, 0xf16: 0x4000, 0xf17: 0x4000,
+	0xf18: 0x4000, 0xf19: 0x4000, 0xf1a: 0x4000, 0xf1b: 0x4000, 0xf1c: 0x4000, 0xf1d: 0x4000,
+	0xf1e: 0x4000, 0xf1f: 0x4000, 0xf20: 0x4000, 0xf21: 0x4000, 0xf22: 0x4000, 0xf23: 0x4000,
+	0xf24: 0x4000, 0xf25: 0x4000, 0xf26: 0x4000, 0xf27: 0x4000, 0xf28: 0x4000, 0xf29: 0x4000,
+	0xf2a: 0x4000, 0xf2b: 0x4000, 0xf2c: 0x4000, 0xf2d: 0x4000, 0xf2e: 0x4000, 0xf2f: 0x4000,
+	0xf30: 0x4000, 0xf31: 0x4000, 0xf32: 0x4000, 0xf33: 0x4000, 0xf34: 0x4000, 0xf35: 0x4000,
+	0xf36: 0x4000, 0xf37: 0x4000, 0xf38: 0x4000, 0xf39: 0x4000, 0xf3a: 0x4000, 0xf3b: 0x4000,
+	0xf3c: 0x4000, 0xf3d: 0x4000, 0xf3e: 0x4000,
+	// Block 0x3d, offset 0xf40
+	0xf40: 0x4000, 0xf41: 0x4000, 0xf42: 0x4000, 0xf43: 0x4000, 0xf44: 0x4000, 0xf45: 0x4000,
+	0xf46: 0x4000, 0xf47: 0x4000, 0xf48: 0x4000, 0xf49: 0x4000, 0xf4a: 0x4000, 0xf4b: 0x4000,
+	0xf4c: 0x4000, 0xf50: 0x4000, 0xf51: 0x4000,
+	0xf52: 0x4000, 0xf53: 0x4000, 0xf54: 0x4000, 0xf55: 0x4000, 0xf56: 0x4000, 0xf57: 0x4000,
+	0xf58: 0x4000, 0xf59: 0x4000, 0xf5a: 0x4000, 0xf5b: 0x4000, 0xf5c: 0x4000, 0xf5d: 0x4000,
+	0xf5e: 0x4000, 0xf5f: 0x4000, 0xf60: 0x4000, 0xf61: 0x4000, 0xf62: 0x4000, 0xf63: 0x4000,
+	0xf64: 0x4000, 0xf65: 0x4000, 0xf66: 0x4000, 0xf67: 0x4000, 0xf68: 0x4000, 0xf69: 0x4000,
+	0xf6a: 0x4000, 0xf6b: 0x4000, 0xf6c: 0x4000, 0xf6d: 0x4000, 0xf6e: 0x4000, 0xf6f: 0x4000,
+	0xf70: 0x4000, 0xf71: 0x4000, 0xf72: 0x4000, 0xf73: 0x4000, 0xf74: 0x4000, 0xf75: 0x4000,
+	0xf76: 0x4000, 0xf77: 0x4000, 0xf78: 0x4000, 0xf79: 0x4000, 0xf7a: 0x4000, 0xf7b: 0x4000,
+	0xf7c: 0x4000, 0xf7d: 0x4000, 0xf7e: 0x4000, 0xf7f: 0x4000,
+	// Block 0x3e, offset 0xf80
+	0xf80: 0x4000, 0xf81: 0x4000, 0xf82: 0x4000, 0xf83: 0x4000, 0xf84: 0x4000, 0xf85: 0x4000,
+	0xf86: 0x4000,
+	// Block 0x3f, offset 0xfc0
+	0xfe0: 0x4000, 0xfe1: 0x4000, 0xfe2: 0x4000, 0xfe3: 0x4000,
+	0xfe4: 0x4000, 0xfe5: 0x4000, 0xfe6: 0x4000, 0xfe7: 0x4000, 0xfe8: 0x4000, 0xfe9: 0x4000,
+	0xfea: 0x4000, 0xfeb: 0x4000, 0xfec: 0x4000, 0xfed: 0x4000, 0xfee: 0x4000, 0xfef: 0x4000,
+	0xff0: 0x4000, 0xff1: 0x4000, 0xff2: 0x4000, 0xff3: 0x4000, 0xff4: 0x4000, 0xff5: 0x4000,
+	0xff6: 0x4000, 0xff7: 0x4000, 0xff8: 0x4000, 0xff9: 0x4000, 0xffa: 0x4000, 0xffb: 0x4000,
+	0xffc: 0x4000,
+	// Block 0x40, offset 0x1000
+	0x1000: 0x4000, 0x1001: 0x4000, 0x1002: 0x4000, 0x1003: 0x4000, 0x1004: 0x4000, 0x1005: 0x4000,
+	0x1006: 0x4000, 0x1007: 0x4000, 0x1008: 0x4000, 0x1009: 0x4000, 0x100a: 0x4000, 0x100b: 0x4000,
+	0x100c: 0x4000, 0x100d: 0x4000, 0x100e: 0x4000, 0x100f: 0x4000, 0x1010: 0x4000, 0x1011: 0x4000,
+	0x1012: 0x4000, 0x1013: 0x4000, 0x1014: 0x4000, 0x1015: 0x4000, 0x1016: 0x4000, 0x1017: 0x4000,
+	0x1018: 0x4000, 0x1019: 0x4000, 0x101a: 0x4000, 0x101b: 0x4000, 0x101c: 0x4000, 0x101d: 0x4000,
+	0x101e: 0x4000, 0x101f: 0x4000, 0x1020: 0x4000, 0x1021: 0x4000, 0x1022: 0x4000, 0x1023: 0x4000,
+	// Block 0x41, offset 0x1040
+	0x1040: 0x2000, 0x1041: 0x2000, 0x1042: 0x2000, 0x1043: 0x2000, 0x1044: 0x2000, 0x1045: 0x2000,
+	0x1046: 0x2000, 0x1047: 0x2000, 0x1048: 0x2000, 0x1049: 0x2000, 0x104a: 0x2000, 0x104b: 0x2000,
+	0x104c: 0x2000, 0x104d: 0x2000, 0x104e: 0x2000, 0x104f: 0x2000, 0x1050: 0x4000, 0x1051: 0x4000,
+	0x1052: 0x4000, 0x1053: 0x4000, 0x1054: 0x4000, 0x1055: 0x4000, 0x1056: 0x4000, 0x1057: 0x4000,
+	0x1058: 0x4000, 0x1059: 0x4000,
+	0x1070: 0x4000, 0x1071: 0x4000, 0x1072: 0x4000, 0x1073: 0x4000, 0x1074: 0x4000, 0x1075: 0x4000,
+	0x1076: 0x4000, 0x1077: 0x4000, 0x1078: 0x4000, 0x1079: 0x4000, 0x107a: 0x4000, 0x107b: 0x4000,
+	0x107c: 0x4000, 0x107d: 0x4000, 0x107e: 0x4000, 0x107f: 0x4000,
+	// Block 0x42, offset 0x1080
+	0x1080: 0x4000, 0x1081: 0x4000, 0x1082: 0x4000, 0x1083: 0x4000, 0x1084: 0x4000, 0x1085: 0x4000,
+	0x1086: 0x4000, 0x1087: 0x4000, 0x1088: 0x4000, 0x1089: 0x4000, 0x108a: 0x4000, 0x108b: 0x4000,
+	0x108c: 0x4000, 0x108d: 0x4000, 0x108e: 0x4000, 0x108f: 0x4000, 0x1090: 0x4000, 0x1091: 0x4000,
+	0x1092: 0x4000, 0x1094: 0x4000, 0x1095: 0x4000, 0x1096: 0x4000, 0x1097: 0x4000,
+	0x1098: 0x4000, 0x1099: 0x4000, 0x109a: 0x4000, 0x109b: 0x4000, 0x109c: 0x4000, 0x109d: 0x4000,
+	0x109e: 0x4000, 0x109f: 0x4000, 0x10a0: 0x4000, 0x10a1: 0x4000, 0x10a2: 0x4000, 0x10a3: 0x4000,
+	0x10a4: 0x4000, 0x10a5: 0x4000, 0x10a6: 0x4000, 0x10a8: 0x4000, 0x10a9: 0x4000,
+	0x10aa: 0x4000, 0x10ab: 0x4000,
+	// Block 0x43, offset 0x10c0
+	0x10c1: 0x9012, 0x10c2: 0x9012, 0x10c3: 0x9012, 0x10c4: 0x9012, 0x10c5: 0x9012,
+	0x10c6: 0x9012, 0x10c7: 0x9012, 0x10c8: 0x9012, 0x10c9: 0x9012, 0x10ca: 0x9012, 0x10cb: 0x9012,
+	0x10cc: 0x9012, 0x10cd: 0x9012, 0x10ce: 0x9012, 0x10cf: 0x9012, 0x10d0: 0x9012, 0x10d1: 0x9012,
+	0x10d2: 0x9012, 0x10d3: 0x9012, 0x10d4: 0x9012, 0x10d5: 0x9012, 0x10d6: 0x9012, 0x10d7: 0x9012,
+	0x10d8: 0x9012, 0x10d9: 0x9012, 0x10da: 0x9012, 0x10db: 0x9012, 0x10dc: 0x9012, 0x10dd: 0x9012,
+	0x10de: 0x9012, 0x10df: 0x9012, 0x10e0: 0x9049, 0x10e1: 0x9049, 0x10e2: 0x9049, 0x10e3: 0x9049,
+	0x10e4: 0x9049, 0x10e5: 0x9049, 0x10e6: 0x9049, 0x10e7: 0x9049, 0x10e8: 0x9049, 0x10e9: 0x9049,
+	0x10ea: 0x9049, 0x10eb: 0x9049, 0x10ec: 0x9049, 0x10ed: 0x9049, 0x10ee: 0x9049, 0x10ef: 0x9049,
+	0x10f0: 0x9049, 0x10f1: 0x9049, 0x10f2: 0x9049, 0x10f3: 0x9049, 0x10f4: 0x9049, 0x10f5: 0x9049,
+	0x10f6: 0x9049, 0x10f7: 0x9049, 0x10f8: 0x9049, 0x10f9: 0x9049, 0x10fa: 0x9049, 0x10fb: 0x9049,
+	0x10fc: 0x9049, 0x10fd: 0x9049, 0x10fe: 0x9049, 0x10ff: 0x9049,
+	// Block 0x44, offset 0x1100
+	0x1100: 0x9049, 0x1101: 0x9049, 0x1102: 0x9049, 0x1103: 0x9049, 0x1104: 0x9049, 0x1105: 0x9049,
+	0x1106: 0x9049, 0x1107: 0x9049, 0x1108: 0x9049, 0x1109: 0x9049, 0x110a: 0x9049, 0x110b: 0x9049,
+	0x110c: 0x9049, 0x110d: 0x9049, 0x110e: 0x9049, 0x110f: 0x9049, 0x1110: 0x9049, 0x1111: 0x9049,
+	0x1112: 0x9049, 0x1113: 0x9049, 0x1114: 0x9049, 0x1115: 0x9049, 0x1116: 0x9049, 0x1117: 0x9049,
+	0x1118: 0x9049, 0x1119: 0x9049, 0x111a: 0x9049, 0x111b: 0x9049, 0x111c: 0x9049, 0x111d: 0x9049,
+	0x111e: 0x9049, 0x111f: 0x904a, 0x1120: 0x904b, 0x1121: 0xb04c, 0x1122: 0xb04d, 0x1123: 0xb04d,
+	0x1124: 0xb04e, 0x1125: 0xb04f, 0x1126: 0xb050, 0x1127: 0xb051, 0x1128: 0xb052, 0x1129: 0xb053,
+	0x112a: 0xb054, 0x112b: 0xb055, 0x112c: 0xb056, 0x112d: 0xb057, 0x112e: 0xb058, 0x112f: 0xb059,
+	0x1130: 0xb05a, 0x1131: 0xb05b, 0x1132: 0xb05c, 0x1133: 0xb05d, 0x1134: 0xb05e, 0x1135: 0xb05f,
+	0x1136: 0xb060, 0x1137: 0xb061, 0x1138: 0xb062, 0x1139: 0xb063, 0x113a: 0xb064, 0x113b: 0xb065,
+	0x113c: 0xb052, 0x113d: 0xb066, 0x113e: 0xb067, 0x113f: 0xb055,
+	// Block 0x45, offset 0x1140
+	0x1140: 0xb068, 0x1141: 0xb069, 0x1142: 0xb06a, 0x1143: 0xb06b, 0x1144: 0xb05a, 0x1145: 0xb056,
+	0x1146: 0xb06c, 0x1147: 0xb06d, 0x1148: 0xb06b, 0x1149: 0xb06e, 0x114a: 0xb06b, 0x114b: 0xb06f,
+	0x114c: 0xb06f, 0x114d: 0xb070, 0x114e: 0xb070, 0x114f: 0xb071, 0x1150: 0xb056, 0x1151: 0xb072,
+	0x1152: 0xb073, 0x1153: 0xb072, 0x1154: 0xb074, 0x1155: 0xb073, 0x1156: 0xb075, 0x1157: 0xb075,
+	0x1158: 0xb076, 0x1159: 0xb076, 0x115a: 0xb077, 0x115b: 0xb077, 0x115c: 0xb073, 0x115d: 0xb078,
+	0x115e: 0xb079, 0x115f: 0xb067, 0x1160: 0xb07a, 0x1161: 0xb07b, 0x1162: 0xb07b, 0x1163: 0xb07b,
+	0x1164: 0xb07b, 0x1165: 0xb07b, 0x1166: 0xb07b, 0x1167: 0xb07b, 0x1168: 0xb07b, 0x1169: 0xb07b,
+	0x116a: 0xb07b, 0x116b: 0xb07b, 0x116c: 0xb07b, 0x116d: 0xb07b, 0x116e: 0xb07b, 0x116f: 0xb07b,
+	0x1170: 0xb07c, 0x1171: 0xb07c, 0x1172: 0xb07c, 0x1173: 0xb07c, 0x1174: 0xb07c, 0x1175: 0xb07c,
+	0x1176: 0xb07c, 0x1177: 0xb07c, 0x1178: 0xb07c, 0x1179: 0xb07c, 0x117a: 0xb07c, 0x117b: 0xb07c,
+	0x117c: 0xb07c, 0x117d: 0xb07c, 0x117e: 0xb07c,
+	// Block 0x46, offset 0x1180
+	0x1182: 0xb07d, 0x1183: 0xb07e, 0x1184: 0xb07f, 0x1185: 0xb080,
+	0x1186: 0xb07f, 0x1187: 0xb07e, 0x118a: 0xb081, 0x118b: 0xb082,
+	0x118c: 0xb083, 0x118d: 0xb07f, 0x118e: 0xb080, 0x118f: 0xb07f,
+	0x1192: 0xb084, 0x1193: 0xb085, 0x1194: 0xb084, 0x1195: 0xb086, 0x1196: 0xb084, 0x1197: 0xb087,
+	0x119a: 0xb088, 0x119b: 0xb089, 0x119c: 0xb08a,
+	0x11a0: 0x908b, 0x11a1: 0x908b, 0x11a2: 0x908c, 0x11a3: 0x908d,
+	0x11a4: 0x908b, 0x11a5: 0x908e, 0x11a6: 0x908f, 0x11a8: 0xb090, 0x11a9: 0xb091,
+	0x11aa: 0xb092, 0x11ab: 0xb091, 0x11ac: 0xb093, 0x11ad: 0xb094, 0x11ae: 0xb095,
+	0x11bd: 0x2000,
+	// Block 0x47, offset 0x11c0
+	0x11e0: 0x4000, 0x11e1: 0x4000,
+	// Block 0x48, offset 0x1200
+	0x1200: 0x4000, 0x1201: 0x4000, 0x1202: 0x4000, 0x1203: 0x4000, 0x1204: 0x4000, 0x1205: 0x4000,
+	0x1206: 0x4000, 0x1207: 0x4000, 0x1208: 0x4000, 0x1209: 0x4000, 0x120a: 0x4000, 0x120b: 0x4000,
+	0x120c: 0x4000, 0x120d: 0x4000, 0x120e: 0x4000, 0x120f: 0x4000, 0x1210: 0x4000, 0x1211: 0x4000,
+	0x1212: 0x4000, 0x1213: 0x4000, 0x1214: 0x4000, 0x1215: 0x4000, 0x1216: 0x4000, 0x1217: 0x4000,
+	0x1218: 0x4000, 0x1219: 0x4000, 0x121a: 0x4000, 0x121b: 0x4000, 0x121c: 0x4000, 0x121d: 0x4000,
+	0x121e: 0x4000, 0x121f: 0x4000, 0x1220: 0x4000, 0x1221: 0x4000, 0x1222: 0x4000, 0x1223: 0x4000,
+	0x1224: 0x4000, 0x1225: 0x4000, 0x1226: 0x4000, 0x1227: 0x4000, 0x1228: 0x4000, 0x1229: 0x4000,
+	0x122a: 0x4000, 0x122b: 0x4000, 0x122c: 0x4000, 0x122d: 0x4000, 0x122e: 0x4000, 0x122f: 0x4000,
+	0x1230: 0x4000, 0x1231: 0x4000,
+	// Block 0x49, offset 0x1240
+	0x1240: 0x4000, 0x1241: 0x4000, 0x1242: 0x4000, 0x1243: 0x4000, 0x1244: 0x4000, 0x1245: 0x4000,
+	0x1246: 0x4000, 0x1247: 0x4000, 0x1248: 0x4000, 0x1249: 0x4000, 0x124a: 0x4000, 0x124b: 0x4000,
+	0x124c: 0x4000, 0x124d: 0x4000, 0x124e: 0x4000, 0x124f: 0x4000, 0x1250: 0x4000, 0x1251: 0x4000,
+	0x1252: 0x4000, 0x1253: 0x4000, 0x1254: 0x4000, 0x1255: 0x4000, 0x1256: 0x4000, 0x1257: 0x4000,
+	0x1258: 0x4000, 0x1259: 0x4000, 0x125a: 0x4000, 0x125b: 0x4000, 0x125c: 0x4000, 0x125d: 0x4000,
+	0x125e: 0x4000, 0x125f: 0x4000, 0x1260: 0x4000, 0x1261: 0x4000, 0x1262: 0x4000, 0x1263: 0x4000,
+	0x1264: 0x4000, 0x1265: 0x4000, 0x1266: 0x4000, 0x1267: 0x4000, 0x1268: 0x4000, 0x1269: 0x4000,
+	0x126a: 0x4000, 0x126b: 0x4000, 0x126c: 0x4000, 0x126d: 0x4000, 0x126e: 0x4000, 0x126f: 0x4000,
+	0x1270: 0x4000, 0x1271: 0x4000, 0x1272: 0x4000,
+	// Block 0x4a, offset 0x1280
+	0x1280: 0x4000, 0x1281: 0x4000, 0x1282: 0x4000, 0x1283: 0x4000, 0x1284: 0x4000, 0x1285: 0x4000,
+	0x1286: 0x4000, 0x1287: 0x4000, 0x1288: 0x4000, 0x1289: 0x4000, 0x128a: 0x4000, 0x128b: 0x4000,
+	0x128c: 0x4000, 0x128d: 0x4000, 0x128e: 0x4000, 0x128f: 0x4000, 0x1290: 0x4000, 0x1291: 0x4000,
+	0x1292: 0x4000, 0x1293: 0x4000, 0x1294: 0x4000, 0x1295: 0x4000, 0x1296: 0x4000, 0x1297: 0x4000,
+	0x1298: 0x4000, 0x1299: 0x4000, 0x129a: 0x4000, 0x129b: 0x4000, 0x129c: 0x4000, 0x129d: 0x4000,
+	0x129e: 0x4000,
+	// Block 0x4b, offset 0x12c0
+	0x12f0: 0x4000, 0x12f1: 0x4000, 0x12f2: 0x4000, 0x12f3: 0x4000, 0x12f4: 0x4000, 0x12f5: 0x4000,
+	0x12f6: 0x4000, 0x12f7: 0x4000, 0x12f8: 0x4000, 0x12f9: 0x4000, 0x12fa: 0x4000, 0x12fb: 0x4000,
+	0x12fc: 0x4000, 0x12fd: 0x4000, 0x12fe: 0x4000, 0x12ff: 0x4000,
+	// Block 0x4c, offset 0x1300
+	0x1300: 0x4000, 0x1301: 0x4000, 0x1302: 0x4000, 0x1303: 0x4000, 0x1304: 0x4000, 0x1305: 0x4000,
+	0x1306: 0x4000, 0x1307: 0x4000, 0x1308: 0x4000, 0x1309: 0x4000, 0x130a: 0x4000, 0x130b: 0x4000,
+	0x130c: 0x4000, 0x130d: 0x4000, 0x130e: 0x4000, 0x130f: 0x4000, 0x1310: 0x4000, 0x1311: 0x4000,
+	0x1312: 0x4000, 0x1313: 0x4000, 0x1314: 0x4000, 0x1315: 0x4000, 0x1316: 0x4000, 0x1317: 0x4000,
+	0x1318: 0x4000, 0x1319: 0x4000, 0x131a: 0x4000, 0x131b: 0x4000, 0x131c: 0x4000, 0x131d: 0x4000,
+	0x131e: 0x4000, 0x131f: 0x4000, 0x1320: 0x4000, 0x1321: 0x4000, 0x1322: 0x4000, 0x1323: 0x4000,
+	0x1324: 0x4000, 0x1325: 0x4000, 0x1326: 0x4000, 0x1327: 0x4000, 0x1328: 0x4000, 0x1329: 0x4000,
+	0x132a: 0x4000, 0x132b: 0x4000, 0x132c: 0x4000, 0x132d: 0x4000, 0x132e: 0x4000, 0x132f: 0x4000,
+	0x1330: 0x4000, 0x1331: 0x4000, 0x1332: 0x4000, 0x1333: 0x4000, 0x1334: 0x4000, 0x1335: 0x4000,
+	0x1336: 0x4000, 0x1337: 0x4000, 0x1338: 0x4000, 0x1339: 0x4000, 0x133a: 0x4000, 0x133b: 0x4000,
+	// Block 0x4d, offset 0x1340
+	0x1344: 0x4000,
+	// Block 0x4e, offset 0x1380
+	0x138f: 0x4000,
+	// Block 0x4f, offset 0x13c0
+	0x13c0: 0x2000, 0x13c1: 0x2000, 0x13c2: 0x2000, 0x13c3: 0x2000, 0x13c4: 0x2000, 0x13c5: 0x2000,
+	0x13c6: 0x2000, 0x13c7: 0x2000, 0x13c8: 0x2000, 0x13c9: 0x2000, 0x13ca: 0x2000,
+	0x13d0: 0x2000, 0x13d1: 0x2000,
+	0x13d2: 0x2000, 0x13d3: 0x2000, 0x13d4: 0x2000, 0x13d5: 0x2000, 0x13d6: 0x2000, 0x13d7: 0x2000,
+	0x13d8: 0x2000, 0x13d9: 0x2000, 0x13da: 0x2000, 0x13db: 0x2000, 0x13dc: 0x2000, 0x13dd: 0x2000,
+	0x13de: 0x2000, 0x13df: 0x2000, 0x13e0: 0x2000, 0x13e1: 0x2000, 0x13e2: 0x2000, 0x13e3: 0x2000,
+	0x13e4: 0x2000, 0x13e5: 0x2000, 0x13e6: 0x2000, 0x13e7: 0x2000, 0x13e8: 0x2000, 0x13e9: 0x2000,
+	0x13ea: 0x2000, 0x13eb: 0x2000, 0x13ec: 0x2000, 0x13ed: 0x2000,
+	0x13f0: 0x2000, 0x13f1: 0x2000, 0x13f2: 0x2000, 0x13f3: 0x2000, 0x13f4: 0x2000, 0x13f5: 0x2000,
+	0x13f6: 0x2000, 0x13f7: 0x2000, 0x13f8: 0x2000, 0x13f9: 0x2000, 0x13fa: 0x2000, 0x13fb: 0x2000,
+	0x13fc: 0x2000, 0x13fd: 0x2000, 0x13fe: 0x2000, 0x13ff: 0x2000,
+	// Block 0x50, offset 0x1400
+	0x1400: 0x2000, 0x1401: 0x2000, 0x1402: 0x2000, 0x1403: 0x2000, 0x1404: 0x2000, 0x1405: 0x2000,
+	0x1406: 0x2000, 0x1407: 0x2000, 0x1408: 0x2000, 0x1409: 0x2000, 0x140a: 0x2000, 0x140b: 0x2000,
+	0x140c: 0x2000, 0x140d: 0x2000, 0x140e: 0x2000, 0x140f: 0x2000, 0x1410: 0x2000, 0x1411: 0x2000,
+	0x1412: 0x2000, 0x1413: 0x2000, 0x1414: 0x2000, 0x1415: 0x2000, 0x1416: 0x2000, 0x1417: 0x2000,
+	0x1418: 0x2000, 0x1419: 0x2000, 0x141a: 0x2000, 0x141b: 0x2000, 0x141c: 0x2000, 0x141d: 0x2000,
+	0x141e: 0x2000, 0x141f: 0x2000, 0x1420: 0x2000, 0x1421: 0x2000, 0x1422: 0x2000, 0x1423: 0x2000,
+	0x1424: 0x2000, 0x1425: 0x2000, 0x1426: 0x2000, 0x1427: 0x2000, 0x1428: 0x2000, 0x1429: 0x2000,
+	0x1430: 0x2000, 0x1431: 0x2000, 0x1432: 0x2000, 0x1433: 0x2000, 0x1434: 0x2000, 0x1435: 0x2000,
+	0x1436: 0x2000, 0x1437: 0x2000, 0x1438: 0x2000, 0x1439: 0x2000, 0x143a: 0x2000, 0x143b: 0x2000,
+	0x143c: 0x2000, 0x143d: 0x2000, 0x143e: 0x2000, 0x143f: 0x2000,
+	// Block 0x51, offset 0x1440
+	0x1440: 0x2000, 0x1441: 0x2000, 0x1442: 0x2000, 0x1443: 0x2000, 0x1444: 0x2000, 0x1445: 0x2000,
+	0x1446: 0x2000, 0x1447: 0x2000, 0x1448: 0x2000, 0x1449: 0x2000, 0x144a: 0x2000, 0x144b: 0x2000,
+	0x144c: 0x2000, 0x144d: 0x2000, 0x144e: 0x4000, 0x144f: 0x2000, 0x1450: 0x2000, 0x1451: 0x4000,
+	0x1452: 0x4000, 0x1453: 0x4000, 0x1454: 0x4000, 0x1455: 0x4000, 0x1456: 0x4000, 0x1457: 0x4000,
+	0x1458: 0x4000, 0x1459: 0x4000, 0x145a: 0x4000, 0x145b: 0x2000, 0x145c: 0x2000, 0x145d: 0x2000,
+	0x145e: 0x2000, 0x145f: 0x2000, 0x1460: 0x2000, 0x1461: 0x2000, 0x1462: 0x2000, 0x1463: 0x2000,
+	0x1464: 0x2000, 0x1465: 0x2000, 0x1466: 0x2000, 0x1467: 0x2000, 0x1468: 0x2000, 0x1469: 0x2000,
+	0x146a: 0x2000, 0x146b: 0x2000, 0x146c: 0x2000,
+	// Block 0x52, offset 0x1480
+	0x1480: 0x4000, 0x1481: 0x4000, 0x1482: 0x4000,
+	0x1490: 0x4000, 0x1491: 0x4000,
+	0x1492: 0x4000, 0x1493: 0x4000, 0x1494: 0x4000, 0x1495: 0x4000, 0x1496: 0x4000, 0x1497: 0x4000,
+	0x1498: 0x4000, 0x1499: 0x4000, 0x149a: 0x4000, 0x149b: 0x4000, 0x149c: 0x4000, 0x149d: 0x4000,
+	0x149e: 0x4000, 0x149f: 0x4000, 0x14a0: 0x4000, 0x14a1: 0x4000, 0x14a2: 0x4000, 0x14a3: 0x4000,
+	0x14a4: 0x4000, 0x14a5: 0x4000, 0x14a6: 0x4000, 0x14a7: 0x4000, 0x14a8: 0x4000, 0x14a9: 0x4000,
+	0x14aa: 0x4000, 0x14ab: 0x4000, 0x14ac: 0x4000, 0x14ad: 0x4000, 0x14ae: 0x4000, 0x14af: 0x4000,
+	0x14b0: 0x4000, 0x14b1: 0x4000, 0x14b2: 0x4000, 0x14b3: 0x4000, 0x14b4: 0x4000, 0x14b5: 0x4000,
+	0x14b6: 0x4000, 0x14b7: 0x4000, 0x14b8: 0x4000, 0x14b9: 0x4000, 0x14ba: 0x4000, 0x14bb: 0x4000,
+	// Block 0x53, offset 0x14c0
+	0x14c0: 0x4000, 0x14c1: 0x4000, 0x14c2: 0x4000, 0x14c3: 0x4000, 0x14c4: 0x4000, 0x14c5: 0x4000,
+	0x14c6: 0x4000, 0x14c7: 0x4000, 0x14c8: 0x4000,
+	0x14d0: 0x4000, 0x14d1: 0x4000,
+	0x14e0: 0x4000, 0x14e1: 0x4000, 0x14e2: 0x4000, 0x14e3: 0x4000,
+	0x14e4: 0x4000, 0x14e5: 0x4000,
+	// Block 0x54, offset 0x1500
+	0x1500: 0x4000, 0x1501: 0x4000, 0x1502: 0x4000, 0x1503: 0x4000, 0x1504: 0x4000, 0x1505: 0x4000,
+	0x1506: 0x4000, 0x1507: 0x4000, 0x1508: 0x4000, 0x1509: 0x4000, 0x150a: 0x4000, 0x150b: 0x4000,
+	0x150c: 0x4000, 0x150d: 0x4000, 0x150e: 0x4000, 0x150f: 0x4000, 0x1510: 0x4000, 0x1511: 0x4000,
+	0x1512: 0x4000, 0x1513: 0x4000, 0x1514: 0x4000, 0x1515: 0x4000, 0x1516: 0x4000, 0x1517: 0x4000,
+	0x1518: 0x4000, 0x1519: 0x4000, 0x151a: 0x4000, 0x151b: 0x4000, 0x151c: 0x4000, 0x151d: 0x4000,
+	0x151e: 0x4000, 0x151f: 0x4000, 0x1520: 0x4000,
+	0x152d: 0x4000, 0x152e: 0x4000, 0x152f: 0x4000,
+	0x1530: 0x4000, 0x1531: 0x4000, 0x1532: 0x4000, 0x1533: 0x4000, 0x1534: 0x4000, 0x1535: 0x4000,
+	0x1537: 0x4000, 0x1538: 0x4000, 0x1539: 0x4000, 0x153a: 0x4000, 0x153b: 0x4000,
+	0x153c: 0x4000, 0x153d: 0x4000, 0x153e: 0x4000, 0x153f: 0x4000,
+	// Block 0x55, offset 0x1540
+	0x1540: 0x4000, 0x1541: 0x4000, 0x1542: 0x4000, 0x1543: 0x4000, 0x1544: 0x4000, 0x1545: 0x4000,
+	0x1546: 0x4000, 0x1547: 0x4000, 0x1548: 0x4000, 0x1549: 0x4000, 0x154a: 0x4000, 0x154b: 0x4000,
+	0x154c: 0x4000, 0x154d: 0x4000, 0x154e: 0x4000, 0x154f: 0x4000, 0x1550: 0x4000, 0x1551: 0x4000,
+	0x1552: 0x4000, 0x1553: 0x4000, 0x1554: 0x4000, 0x1555: 0x4000, 0x1556: 0x4000, 0x1557: 0x4000,
+	0x1558: 0x4000, 0x1559: 0x4000, 0x155a: 0x4000, 0x155b: 0x4000, 0x155c: 0x4000, 0x155d: 0x4000,
+	0x155e: 0x4000, 0x155f: 0x4000, 0x1560: 0x4000, 0x1561: 0x4000, 0x1562: 0x4000, 0x1563: 0x4000,
+	0x1564: 0x4000, 0x1565: 0x4000, 0x1566: 0x4000, 0x1567: 0x4000, 0x1568: 0x4000, 0x1569: 0x4000,
+	0x156a: 0x4000, 0x156b: 0x4000, 0x156c: 0x4000, 0x156d: 0x4000, 0x156e: 0x4000, 0x156f: 0x4000,
+	0x1570: 0x4000, 0x1571: 0x4000, 0x1572: 0x4000, 0x1573: 0x4000, 0x1574: 0x4000, 0x1575: 0x4000,
+	0x1576: 0x4000, 0x1577: 0x4000, 0x1578: 0x4000, 0x1579: 0x4000, 0x157a: 0x4000, 0x157b: 0x4000,
+	0x157c: 0x4000, 0x157e: 0x4000, 0x157f: 0x4000,
+	// Block 0x56, offset 0x1580
+	0x1580: 0x4000, 0x1581: 0x4000, 0x1582: 0x4000, 0x1583: 0x4000, 0x1584: 0x4000, 0x1585: 0x4000,
+	0x1586: 0x4000, 0x1587: 0x4000, 0x1588: 0x4000, 0x1589: 0x4000, 0x158a: 0x4000, 0x158b: 0x4000,
+	0x158c: 0x4000, 0x158d: 0x4000, 0x158e: 0x4000, 0x158f: 0x4000, 0x1590: 0x4000, 0x1591: 0x4000,
+	0x1592: 0x4000, 0x1593: 0x4000,
+	0x15a0: 0x4000, 0x15a1: 0x4000, 0x15a2: 0x4000, 0x15a3: 0x4000,
+	0x15a4: 0x4000, 0x15a5: 0x4000, 0x15a6: 0x4000, 0x15a7: 0x4000, 0x15a8: 0x4000, 0x15a9: 0x4000,
+	0x15aa: 0x4000, 0x15ab: 0x4000, 0x15ac: 0x4000, 0x15ad: 0x4000, 0x15ae: 0x4000, 0x15af: 0x4000,
+	0x15b0: 0x4000, 0x15b1: 0x4000, 0x15b2: 0x4000, 0x15b3: 0x4000, 0x15b4: 0x4000, 0x15b5: 0x4000,
+	0x15b6: 0x4000, 0x15b7: 0x4000, 0x15b8: 0x4000, 0x15b9: 0x4000, 0x15ba: 0x4000, 0x15bb: 0x4000,
+	0x15bc: 0x4000, 0x15bd: 0x4000, 0x15be: 0x4000, 0x15bf: 0x4000,
+	// Block 0x57, offset 0x15c0
+	0x15c0: 0x4000, 0x15c1: 0x4000, 0x15c2: 0x4000, 0x15c3: 0x4000, 0x15c4: 0x4000, 0x15c5: 0x4000,
+	0x15c6: 0x4000, 0x15c7: 0x4000, 0x15c8: 0x4000, 0x15c9: 0x4000, 0x15ca: 0x4000,
+	0x15cf: 0x4000, 0x15d0: 0x4000, 0x15d1: 0x4000,
+	0x15d2: 0x4000, 0x15d3: 0x4000,
+	0x15e0: 0x4000, 0x15e1: 0x4000, 0x15e2: 0x4000, 0x15e3: 0x4000,
+	0x15e4: 0x4000, 0x15e5: 0x4000, 0x15e6: 0x4000, 0x15e7: 0x4000, 0x15e8: 0x4000, 0x15e9: 0x4000,
+	0x15ea: 0x4000, 0x15eb: 0x4000, 0x15ec: 0x4000, 0x15ed: 0x4000, 0x15ee: 0x4000, 0x15ef: 0x4000,
+	0x15f0: 0x4000, 0x15f4: 0x4000,
+	0x15f8: 0x4000, 0x15f9: 0x4000, 0x15fa: 0x4000, 0x15fb: 0x4000,
+	0x15fc: 0x4000, 0x15fd: 0x4000, 0x15fe: 0x4000, 0x15ff: 0x4000,
+	// Block 0x58, offset 0x1600
+	0x1600: 0x4000, 0x1602: 0x4000, 0x1603: 0x4000, 0x1604: 0x4000, 0x1605: 0x4000,
+	0x1606: 0x4000, 0x1607: 0x4000, 0x1608: 0x4000, 0x1609: 0x4000, 0x160a: 0x4000, 0x160b: 0x4000,
+	0x160c: 0x4000, 0x160d: 0x4000, 0x160e: 0x4000, 0x160f: 0x4000, 0x1610: 0x4000, 0x1611: 0x4000,
+	0x1612: 0x4000, 0x1613: 0x4000, 0x1614: 0x4000, 0x1615: 0x4000, 0x1616: 0x4000, 0x1617: 0x4000,
+	0x1618: 0x4000, 0x1619: 0x4000, 0x161a: 0x4000, 0x161b: 0x4000, 0x161c: 0x4000, 0x161d: 0x4000,
+	0x161e: 0x4000, 0x161f: 0x4000, 0x1620: 0x4000, 0x1621: 0x4000, 0x1622: 0x4000, 0x1623: 0x4000,
+	0x1624: 0x4000, 0x1625: 0x4000, 0x1626: 0x4000, 0x1627: 0x4000, 0x1628: 0x4000, 0x1629: 0x4000,
+	0x162a: 0x4000, 0x162b: 0x4000, 0x162c: 0x4000, 0x162d: 0x4000, 0x162e: 0x4000, 0x162f: 0x4000,
+	0x1630: 0x4000, 0x1631: 0x4000, 0x1632: 0x4000, 0x1633: 0x4000, 0x1634: 0x4000, 0x1635: 0x4000,
+	0x1636: 0x4000, 0x1637: 0x4000, 0x1638: 0x4000, 0x1639: 0x4000, 0x163a: 0x4000, 0x163b: 0x4000,
+	0x163c: 0x4000, 0x163d: 0x4000, 0x163e: 0x4000, 0x163f: 0x4000,
+	// Block 0x59, offset 0x1640
+	0x1640: 0x4000, 0x1641: 0x4000, 0x1642: 0x4000, 0x1643: 0x4000, 0x1644: 0x4000, 0x1645: 0x4000,
+	0x1646: 0x4000, 0x1647: 0x4000, 0x1648: 0x4000, 0x1649: 0x4000, 0x164a: 0x4000, 0x164b: 0x4000,
+	0x164c: 0x4000, 0x164d: 0x4000, 0x164e: 0x4000, 0x164f: 0x4000, 0x1650: 0x4000, 0x1651: 0x4000,
+	0x1652: 0x4000, 0x1653: 0x4000, 0x1654: 0x4000, 0x1655: 0x4000, 0x1656: 0x4000, 0x1657: 0x4000,
+	0x1658: 0x4000, 0x1659: 0x4000, 0x165a: 0x4000, 0x165b: 0x4000, 0x165c: 0x4000, 0x165d: 0x4000,
+	0x165e: 0x4000, 0x165f: 0x4000, 0x1660: 0x4000, 0x1661: 0x4000, 0x1662: 0x4000, 0x1663: 0x4000,
+	0x1664: 0x4000, 0x1665: 0x4000, 0x1666: 0x4000, 0x1667: 0x4000, 0x1668: 0x4000, 0x1669: 0x4000,
+	0x166a: 0x4000, 0x166b: 0x4000, 0x166c: 0x4000, 0x166d: 0x4000, 0x166e: 0x4000, 0x166f: 0x4000,
+	0x1670: 0x4000, 0x1671: 0x4000, 0x1672: 0x4000, 0x1673: 0x4000, 0x1674: 0x4000, 0x1675: 0x4000,
+	0x1676: 0x4000, 0x1677: 0x4000, 0x1678: 0x4000, 0x1679: 0x4000, 0x167a: 0x4000, 0x167b: 0x4000,
+	0x167c: 0x4000, 0x167f: 0x4000,
+	// Block 0x5a, offset 0x1680
+	0x1680: 0x4000, 0x1681: 0x4000, 0x1682: 0x4000, 0x1683: 0x4000, 0x1684: 0x4000, 0x1685: 0x4000,
+	0x1686: 0x4000, 0x1687: 0x4000, 0x1688: 0x4000, 0x1689: 0x4000, 0x168a: 0x4000, 0x168b: 0x4000,
+	0x168c: 0x4000, 0x168d: 0x4000, 0x168e: 0x4000, 0x168f: 0x4000, 0x1690: 0x4000, 0x1691: 0x4000,
+	0x1692: 0x4000, 0x1693: 0x4000, 0x1694: 0x4000, 0x1695: 0x4000, 0x1696: 0x4000, 0x1697: 0x4000,
+	0x1698: 0x4000, 0x1699: 0x4000, 0x169a: 0x4000, 0x169b: 0x4000, 0x169c: 0x4000, 0x169d: 0x4000,
+	0x169e: 0x4000, 0x169f: 0x4000, 0x16a0: 0x4000, 0x16a1: 0x4000, 0x16a2: 0x4000, 0x16a3: 0x4000,
+	0x16a4: 0x4000, 0x16a5: 0x4000, 0x16a6: 0x4000, 0x16a7: 0x4000, 0x16a8: 0x4000, 0x16a9: 0x4000,
+	0x16aa: 0x4000, 0x16ab: 0x4000, 0x16ac: 0x4000, 0x16ad: 0x4000, 0x16ae: 0x4000, 0x16af: 0x4000,
+	0x16b0: 0x4000, 0x16b1: 0x4000, 0x16b2: 0x4000, 0x16b3: 0x4000, 0x16b4: 0x4000, 0x16b5: 0x4000,
+	0x16b6: 0x4000, 0x16b7: 0x4000, 0x16b8: 0x4000, 0x16b9: 0x4000, 0x16ba: 0x4000, 0x16bb: 0x4000,
+	0x16bc: 0x4000, 0x16bd: 0x4000,
+	// Block 0x5b, offset 0x16c0
+	0x16cb: 0x4000,
+	0x16cc: 0x4000, 0x16cd: 0x4000, 0x16ce: 0x4000, 0x16d0: 0x4000, 0x16d1: 0x4000,
+	0x16d2: 0x4000, 0x16d3: 0x4000, 0x16d4: 0x4000, 0x16d5: 0x4000, 0x16d6: 0x4000, 0x16d7: 0x4000,
+	0x16d8: 0x4000, 0x16d9: 0x4000, 0x16da: 0x4000, 0x16db: 0x4000, 0x16dc: 0x4000, 0x16dd: 0x4000,
+	0x16de: 0x4000, 0x16df: 0x4000, 0x16e0: 0x4000, 0x16e1: 0x4000, 0x16e2: 0x4000, 0x16e3: 0x4000,
+	0x16e4: 0x4000, 0x16e5: 0x4000, 0x16e6: 0x4000, 0x16e7: 0x4000,
+	0x16fa: 0x4000,
+	// Block 0x5c, offset 0x1700
+	0x1715: 0x4000, 0x1716: 0x4000,
+	0x1724: 0x4000,
+	// Block 0x5d, offset 0x1740
+	0x177b: 0x4000,
+	0x177c: 0x4000, 0x177d: 0x4000, 0x177e: 0x4000, 0x177f: 0x4000,
+	// Block 0x5e, offset 0x1780
+	0x1780: 0x4000, 0x1781: 0x4000, 0x1782: 0x4000, 0x1783: 0x4000, 0x1784: 0x4000, 0x1785: 0x4000,
+	0x1786: 0x4000, 0x1787: 0x4000, 0x1788: 0x4000, 0x1789: 0x4000, 0x178a: 0x4000, 0x178b: 0x4000,
+	0x178c: 0x4000, 0x178d: 0x4000, 0x178e: 0x4000, 0x178f: 0x4000,
+	// Block 0x5f, offset 0x17c0
+	0x17c0: 0x4000, 0x17c1: 0x4000, 0x17c2: 0x4000, 0x17c3: 0x4000, 0x17c4: 0x4000, 0x17c5: 0x4000,
+	0x17cc: 0x4000, 0x17d0: 0x4000, 0x17d1: 0x4000,
+	0x17d2: 0x4000,
+	0x17eb: 0x4000, 0x17ec: 0x4000,
+	0x17f4: 0x4000, 0x17f5: 0x4000,
+	0x17f6: 0x4000, 0x17f7: 0x4000, 0x17f8: 0x4000, 0x17f9: 0x4000,
+	// Block 0x60, offset 0x1800
+	0x1810: 0x4000, 0x1811: 0x4000,
+	0x1812: 0x4000, 0x1813: 0x4000, 0x1814: 0x4000, 0x1815: 0x4000, 0x1816: 0x4000, 0x1817: 0x4000,
+	0x1818: 0x4000, 0x1819: 0x4000, 0x181a: 0x4000, 0x181b: 0x4000, 0x181c: 0x4000, 0x181d: 0x4000,
+	0x181e: 0x4000, 0x181f: 0x4000, 0x1820: 0x4000, 0x1821: 0x4000, 0x1822: 0x4000, 0x1823: 0x4000,
+	0x1824: 0x4000, 0x1825: 0x4000, 0x1826: 0x4000, 0x1827: 0x4000, 0x1828: 0x4000, 0x1829: 0x4000,
+	0x182a: 0x4000, 0x182b: 0x4000, 0x182c: 0x4000, 0x182d: 0x4000, 0x182e: 0x4000, 0x182f: 0x4000,
+	0x1830: 0x4000, 0x1831: 0x4000, 0x1832: 0x4000, 0x1833: 0x4000, 0x1834: 0x4000, 0x1835: 0x4000,
+	0x1836: 0x4000, 0x1837: 0x4000, 0x1838: 0x4000, 0x1839: 0x4000, 0x183a: 0x4000, 0x183b: 0x4000,
+	0x183c: 0x4000, 0x183d: 0x4000, 0x183e: 0x4000,
+	// Block 0x61, offset 0x1840
+	0x1840: 0x4000, 0x1841: 0x4000, 0x1842: 0x4000, 0x1843: 0x4000, 0x1844: 0x4000, 0x1845: 0x4000,
+	0x1846: 0x4000, 0x1847: 0x4000, 0x1848: 0x4000, 0x1849: 0x4000, 0x184a: 0x4000, 0x184b: 0x4000,
+	0x184c: 0x4000, 0x184d: 0x4000, 0x184e: 0x4000, 0x184f: 0x4000, 0x1850: 0x4000, 0x1851: 0x4000,
+	0x1852: 0x4000, 0x1853: 0x4000, 0x1854: 0x4000, 0x1855: 0x4000, 0x1856: 0x4000, 0x1857: 0x4000,
+	0x1858: 0x4000, 0x1859: 0x4000, 0x185a: 0x4000, 0x185b: 0x4000, 0x185c: 0x4000, 0x185d: 0x4000,
+	0x185e: 0x4000, 0x185f: 0x4000, 0x1860: 0x4000, 0x1861: 0x4000, 0x1862: 0x4000, 0x1863: 0x4000,
+	0x1864: 0x4000, 0x1865: 0x4000, 0x1866: 0x4000, 0x1867: 0x4000, 0x1868: 0x4000, 0x1869: 0x4000,
+	0x186a: 0x4000, 0x186b: 0x4000, 0x186c: 0x4000, 0x186d: 0x4000, 0x186e: 0x4000, 0x186f: 0x4000,
+	0x1870: 0x4000, 0x1873: 0x4000, 0x1874: 0x4000, 0x1875: 0x4000,
+	0x1876: 0x4000, 0x187a: 0x4000,
+	0x187c: 0x4000, 0x187d: 0x4000, 0x187e: 0x4000, 0x187f: 0x4000,
+	// Block 0x62, offset 0x1880
+	0x1880: 0x4000, 0x1881: 0x4000, 0x1882: 0x4000, 0x1883: 0x4000, 0x1884: 0x4000, 0x1885: 0x4000,
+	0x1886: 0x4000, 0x1887: 0x4000, 0x1888: 0x4000, 0x1889: 0x4000, 0x188a: 0x4000, 0x188b: 0x4000,
+	0x188c: 0x4000, 0x188d: 0x4000, 0x188e: 0x4000, 0x188f: 0x4000, 0x1890: 0x4000, 0x1891: 0x4000,
+	0x1892: 0x4000, 0x1893: 0x4000, 0x1894: 0x4000, 0x1895: 0x4000, 0x1896: 0x4000, 0x1897: 0x4000,
+	0x1898: 0x4000, 0x1899: 0x4000, 0x189a: 0x4000, 0x189b: 0x4000, 0x189c: 0x4000, 0x189d: 0x4000,
+	0x189e: 0x4000, 0x189f: 0x4000, 0x18a0: 0x4000, 0x18a1: 0x4000, 0x18a2: 0x4000,
+	0x18b0: 0x4000, 0x18b1: 0x4000, 0x18b2: 0x4000, 0x18b3: 0x4000, 0x18b4: 0x4000, 0x18b5: 0x4000,
+	0x18b6: 0x4000, 0x18b7: 0x4000, 0x18b8: 0x4000, 0x18b9: 0x4000,
+	// Block 0x63, offset 0x18c0
+	0x18c0: 0x4000, 0x18c1: 0x4000, 0x18c2: 0x4000,
+	0x18d0: 0x4000, 0x18d1: 0x4000,
+	0x18d2: 0x4000, 0x18d3: 0x4000, 0x18d4: 0x4000, 0x18d5: 0x4000, 0x18d6: 0x4000, 0x18d7: 0x4000,
+	0x18d8: 0x4000, 0x18d9: 0x4000, 0x18da: 0x4000, 0x18db: 0x4000, 0x18dc: 0x4000, 0x18dd: 0x4000,
+	0x18de: 0x4000, 0x18df: 0x4000, 0x18e0: 0x4000, 0x18e1: 0x4000, 0x18e2: 0x4000, 0x18e3: 0x4000,
+	0x18e4: 0x4000, 0x18e5: 0x4000, 0x18e6: 0x4000, 0x18e7: 0x4000, 0x18e8: 0x4000, 0x18e9: 0x4000,
+	0x18ea: 0x4000, 0x18eb: 0x4000, 0x18ec: 0x4000, 0x18ed: 0x4000, 0x18ee: 0x4000, 0x18ef: 0x4000,
+	0x18f0: 0x4000, 0x18f1: 0x4000, 0x18f2: 0x4000, 0x18f3: 0x4000, 0x18f4: 0x4000, 0x18f5: 0x4000,
+	0x18f6: 0x4000, 0x18f7: 0x4000, 0x18f8: 0x4000, 0x18f9: 0x4000, 0x18fa: 0x4000, 0x18fb: 0x4000,
+	0x18fc: 0x4000, 0x18fd: 0x4000, 0x18fe: 0x4000, 0x18ff: 0x4000,
+	// Block 0x64, offset 0x1900
+	0x1900: 0x2000, 0x1901: 0x2000, 0x1902: 0x2000, 0x1903: 0x2000, 0x1904: 0x2000, 0x1905: 0x2000,
+	0x1906: 0x2000, 0x1907: 0x2000, 0x1908: 0x2000, 0x1909: 0x2000, 0x190a: 0x2000, 0x190b: 0x2000,
+	0x190c: 0x2000, 0x190d: 0x2000, 0x190e: 0x2000, 0x190f: 0x2000, 0x1910: 0x2000, 0x1911: 0x2000,
+	0x1912: 0x2000, 0x1913: 0x2000, 0x1914: 0x2000, 0x1915: 0x2000, 0x1916: 0x2000, 0x1917: 0x2000,
+	0x1918: 0x2000, 0x1919: 0x2000, 0x191a: 0x2000, 0x191b: 0x2000, 0x191c: 0x2000, 0x191d: 0x2000,
+	0x191e: 0x2000, 0x191f: 0x2000, 0x1920: 0x2000, 0x1921: 0x2000, 0x1922: 0x2000, 0x1923: 0x2000,
+	0x1924: 0x2000, 0x1925: 0x2000, 0x1926: 0x2000, 0x1927: 0x2000, 0x1928: 0x2000, 0x1929: 0x2000,
+	0x192a: 0x2000, 0x192b: 0x2000, 0x192c: 0x2000, 0x192d: 0x2000, 0x192e: 0x2000, 0x192f: 0x2000,
+	0x1930: 0x2000, 0x1931: 0x2000, 0x1932: 0x2000, 0x1933: 0x2000, 0x1934: 0x2000, 0x1935: 0x2000,
+	0x1936: 0x2000, 0x1937: 0x2000, 0x1938: 0x2000, 0x1939: 0x2000, 0x193a: 0x2000, 0x193b: 0x2000,
+	0x193c: 0x2000, 0x193d: 0x2000,
+}
+
+// widthIndex: 22 blocks, 1408 entries, 1408 bytes
+// Block 0 is the zero block.
+var widthIndex = [1408]uint8{
+	// Block 0x0, offset 0x0
+	// Block 0x1, offset 0x40
+	// Block 0x2, offset 0x80
+	// Block 0x3, offset 0xc0
+	0xc2: 0x01, 0xc3: 0x02, 0xc4: 0x03, 0xc5: 0x04, 0xc7: 0x05,
+	0xc9: 0x06, 0xcb: 0x07, 0xcc: 0x08, 0xcd: 0x09, 0xce: 0x0a, 0xcf: 0x0b,
+	0xd0: 0x0c, 0xd1: 0x0d,
+	0xe1: 0x02, 0xe2: 0x03, 0xe3: 0x04, 0xe4: 0x05, 0xe5: 0x06, 0xe6: 0x06, 0xe7: 0x06,
+	0xe8: 0x06, 0xe9: 0x06, 0xea: 0x07, 0xeb: 0x06, 0xec: 0x06, 0xed: 0x08, 0xee: 0x09, 0xef: 0x0a,
+	0xf0: 0x0f, 0xf3: 0x12, 0xf4: 0x13,
+	// Block 0x4, offset 0x100
+	0x104: 0x0e, 0x105: 0x0f,
+	// Block 0x5, offset 0x140
+	0x140: 0x10, 0x141: 0x11, 0x142: 0x12, 0x144: 0x13, 0x145: 0x14, 0x146: 0x15, 0x147: 0x16,
+	0x148: 0x17, 0x149: 0x18, 0x14a: 0x19, 0x14c: 0x1a, 0x14f: 0x1b,
+	0x151: 0x1c, 0x152: 0x08, 0x153: 0x1d, 0x154: 0x1e, 0x155: 0x1f, 0x156: 0x20, 0x157: 0x21,
+	0x158: 0x22, 0x159: 0x23, 0x15a: 0x24, 0x15b: 0x25, 0x15c: 0x26, 0x15d: 0x27, 0x15e: 0x28, 0x15f: 0x29,
+	0x166: 0x2a,
+	0x16c: 0x2b, 0x16d: 0x2c,
+	0x17a: 0x2d, 0x17b: 0x2e, 0x17c: 0x0e, 0x17d: 0x0e, 0x17e: 0x0e, 0x17f: 0x2f,
+	// Block 0x6, offset 0x180
+	0x180: 0x30, 0x181: 0x31, 0x182: 0x32, 0x183: 0x33, 0x184: 0x34, 0x185: 0x35, 0x186: 0x36, 0x187: 0x37,
+	0x188: 0x38, 0x189: 0x39, 0x18a: 0x0e, 0x18b: 0x3a, 0x18c: 0x0e, 0x18d: 0x0e, 0x18e: 0x0e, 0x18f: 0x0e,
+	0x190: 0x0e, 0x191: 0x0e, 0x192: 0x0e, 0x193: 0x0e, 0x194: 0x0e, 0x195: 0x0e, 0x196: 0x0e, 0x197: 0x0e,
+	0x198: 0x0e, 0x199: 0x0e, 0x19a: 0x0e, 0x19b: 0x0e, 0x19c: 0x0e, 0x19d: 0x0e, 0x19e: 0x0e, 0x19f: 0x0e,
+	0x1a0: 0x0e, 0x1a1: 0x0e, 0x1a2: 0x0e, 0x1a3: 0x0e, 0x1a4: 0x0e, 0x1a5: 0x0e, 0x1a6: 0x0e, 0x1a7: 0x0e,
+	0x1a8: 0x0e, 0x1a9: 0x0e, 0x1aa: 0x0e, 0x1ab: 0x0e, 0x1ac: 0x0e, 0x1ad: 0x0e, 0x1ae: 0x0e, 0x1af: 0x0e,
+	0x1b0: 0x0e, 0x1b1: 0x0e, 0x1b2: 0x0e, 0x1b3: 0x0e, 0x1b4: 0x0e, 0x1b5: 0x0e, 0x1b6: 0x0e, 0x1b7: 0x0e,
+	0x1b8: 0x0e, 0x1b9: 0x0e, 0x1ba: 0x0e, 0x1bb: 0x0e, 0x1bc: 0x0e, 0x1bd: 0x0e, 0x1be: 0x0e, 0x1bf: 0x0e,
+	// Block 0x7, offset 0x1c0
+	0x1c0: 0x0e, 0x1c1: 0x0e, 0x1c2: 0x0e, 0x1c3: 0x0e, 0x1c4: 0x0e, 0x1c5: 0x0e, 0x1c6: 0x0e, 0x1c7: 0x0e,
+	0x1c8: 0x0e, 0x1c9: 0x0e, 0x1ca: 0x0e, 0x1cb: 0x0e, 0x1cc: 0x0e, 0x1cd: 0x0e, 0x1ce: 0x0e, 0x1cf: 0x0e,
+	0x1d0: 0x0e, 0x1d1: 0x0e, 0x1d2: 0x0e, 0x1d3: 0x0e, 0x1d4: 0x0e, 0x1d5: 0x0e, 0x1d6: 0x0e, 0x1d7: 0x0e,
+	0x1d8: 0x0e, 0x1d9: 0x0e, 0x1da: 0x0e, 0x1db: 0x0e, 0x1dc: 0x0e, 0x1dd: 0x0e, 0x1de: 0x0e, 0x1df: 0x0e,
+	0x1e0: 0x0e, 0x1e1: 0x0e, 0x1e2: 0x0e, 0x1e3: 0x0e, 0x1e4: 0x0e, 0x1e5: 0x0e, 0x1e6: 0x0e, 0x1e7: 0x0e,
+	0x1e8: 0x0e, 0x1e9: 0x0e, 0x1ea: 0x0e, 0x1eb: 0x0e, 0x1ec: 0x0e, 0x1ed: 0x0e, 0x1ee: 0x0e, 0x1ef: 0x0e,
+	0x1f0: 0x0e, 0x1f1: 0x0e, 0x1f2: 0x0e, 0x1f3: 0x0e, 0x1f4: 0x0e, 0x1f5: 0x0e, 0x1f6: 0x0e,
+	0x1f8: 0x0e, 0x1f9: 0x0e, 0x1fa: 0x0e, 0x1fb: 0x0e, 0x1fc: 0x0e, 0x1fd: 0x0e, 0x1fe: 0x0e, 0x1ff: 0x0e,
+	// Block 0x8, offset 0x200
+	0x200: 0x0e, 0x201: 0x0e, 0x202: 0x0e, 0x203: 0x0e, 0x204: 0x0e, 0x205: 0x0e, 0x206: 0x0e, 0x207: 0x0e,
+	0x208: 0x0e, 0x209: 0x0e, 0x20a: 0x0e, 0x20b: 0x0e, 0x20c: 0x0e, 0x20d: 0x0e, 0x20e: 0x0e, 0x20f: 0x0e,
+	0x210: 0x0e, 0x211: 0x0e, 0x212: 0x0e, 0x213: 0x0e, 0x214: 0x0e, 0x215: 0x0e, 0x216: 0x0e, 0x217: 0x0e,
+	0x218: 0x0e, 0x219: 0x0e, 0x21a: 0x0e, 0x21b: 0x0e, 0x21c: 0x0e, 0x21d: 0x0e, 0x21e: 0x0e, 0x21f: 0x0e,
+	0x220: 0x0e, 0x221: 0x0e, 0x222: 0x0e, 0x223: 0x0e, 0x224: 0x0e, 0x225: 0x0e, 0x226: 0x0e, 0x227: 0x0e,
+	0x228: 0x0e, 0x229: 0x0e, 0x22a: 0x0e, 0x22b: 0x0e, 0x22c: 0x0e, 0x22d: 0x0e, 0x22e: 0x0e, 0x22f: 0x0e,
+	0x230: 0x0e, 0x231: 0x0e, 0x232: 0x0e, 0x233: 0x0e, 0x234: 0x0e, 0x235: 0x0e, 0x236: 0x0e, 0x237: 0x0e,
+	0x238: 0x0e, 0x239: 0x0e, 0x23a: 0x0e, 0x23b: 0x0e, 0x23c: 0x0e, 0x23d: 0x0e, 0x23e: 0x0e, 0x23f: 0x0e,
+	// Block 0x9, offset 0x240
+	0x240: 0x0e, 0x241: 0x0e, 0x242: 0x0e, 0x243: 0x0e, 0x244: 0x0e, 0x245: 0x0e, 0x246: 0x0e, 0x247: 0x0e,
+	0x248: 0x0e, 0x249: 0x0e, 0x24a: 0x0e, 0x24b: 0x0e, 0x24c: 0x0e, 0x24d: 0x0e, 0x24e: 0x0e, 0x24f: 0x0e,
+	0x250: 0x0e, 0x251: 0x0e, 0x252: 0x3b, 0x253: 0x3c,
+	0x265: 0x3d,
+	0x270: 0x0e, 0x271: 0x0e, 0x272: 0x0e, 0x273: 0x0e, 0x274: 0x0e, 0x275: 0x0e, 0x276: 0x0e, 0x277: 0x0e,
+	0x278: 0x0e, 0x279: 0x0e, 0x27a: 0x0e, 0x27b: 0x0e, 0x27c: 0x0e, 0x27d: 0x0e, 0x27e: 0x0e, 0x27f: 0x0e,
+	// Block 0xa, offset 0x280
+	0x280: 0x0e, 0x281: 0x0e, 0x282: 0x0e, 0x283: 0x0e, 0x284: 0x0e, 0x285: 0x0e, 0x286: 0x0e, 0x287: 0x0e,
+	0x288: 0x0e, 0x289: 0x0e, 0x28a: 0x0e, 0x28b: 0x0e, 0x28c: 0x0e, 0x28d: 0x0e, 0x28e: 0x0e, 0x28f: 0x0e,
+	0x290: 0x0e, 0x291: 0x0e, 0x292: 0x0e, 0x293: 0x0e, 0x294: 0x0e, 0x295: 0x0e, 0x296: 0x0e, 0x297: 0x0e,
+	0x298: 0x0e, 0x299: 0x0e, 0x29a: 0x0e, 0x29b: 0x0e, 0x29c: 0x0e, 0x29d: 0x0e, 0x29e: 0x3e,
+	// Block 0xb, offset 0x2c0
+	0x2c0: 0x08, 0x2c1: 0x08, 0x2c2: 0x08, 0x2c3: 0x08, 0x2c4: 0x08, 0x2c5: 0x08, 0x2c6: 0x08, 0x2c7: 0x08,
+	0x2c8: 0x08, 0x2c9: 0x08, 0x2ca: 0x08, 0x2cb: 0x08, 0x2cc: 0x08, 0x2cd: 0x08, 0x2ce: 0x08, 0x2cf: 0x08,
+	0x2d0: 0x08, 0x2d1: 0x08, 0x2d2: 0x08, 0x2d3: 0x08, 0x2d4: 0x08, 0x2d5: 0x08, 0x2d6: 0x08, 0x2d7: 0x08,
+	0x2d8: 0x08, 0x2d9: 0x08, 0x2da: 0x08, 0x2db: 0x08, 0x2dc: 0x08, 0x2dd: 0x08, 0x2de: 0x08, 0x2df: 0x08,
+	0x2e0: 0x08, 0x2e1: 0x08, 0x2e2: 0x08, 0x2e3: 0x08, 0x2e4: 0x08, 0x2e5: 0x08, 0x2e6: 0x08, 0x2e7: 0x08,
+	0x2e8: 0x08, 0x2e9: 0x08, 0x2ea: 0x08, 0x2eb: 0x08, 0x2ec: 0x08, 0x2ed: 0x08, 0x2ee: 0x08, 0x2ef: 0x08,
+	0x2f0: 0x08, 0x2f1: 0x08, 0x2f2: 0x08, 0x2f3: 0x08, 0x2f4: 0x08, 0x2f5: 0x08, 0x2f6: 0x08, 0x2f7: 0x08,
+	0x2f8: 0x08, 0x2f9: 0x08, 0x2fa: 0x08, 0x2fb: 0x08, 0x2fc: 0x08, 0x2fd: 0x08, 0x2fe: 0x08, 0x2ff: 0x08,
+	// Block 0xc, offset 0x300
+	0x300: 0x08, 0x301: 0x08, 0x302: 0x08, 0x303: 0x08, 0x304: 0x08, 0x305: 0x08, 0x306: 0x08, 0x307: 0x08,
+	0x308: 0x08, 0x309: 0x08, 0x30a: 0x08, 0x30b: 0x08, 0x30c: 0x08, 0x30d: 0x08, 0x30e: 0x08, 0x30f: 0x08,
+	0x310: 0x08, 0x311: 0x08, 0x312: 0x08, 0x313: 0x08, 0x314: 0x08, 0x315: 0x08, 0x316: 0x08, 0x317: 0x08,
+	0x318: 0x08, 0x319: 0x08, 0x31a: 0x08, 0x31b: 0x08, 0x31c: 0x08, 0x31d: 0x08, 0x31e: 0x08, 0x31f: 0x08,
+	0x320: 0x08, 0x321: 0x08, 0x322: 0x08, 0x323: 0x08, 0x324: 0x0e, 0x325: 0x0e, 0x326: 0x0e, 0x327: 0x0e,
+	0x328: 0x0e, 0x329: 0x0e, 0x32a: 0x0e, 0x32b: 0x0e,
+	0x338: 0x3f, 0x339: 0x40, 0x33c: 0x41, 0x33d: 0x42, 0x33e: 0x43, 0x33f: 0x44,
+	// Block 0xd, offset 0x340
+	0x37f: 0x45,
+	// Block 0xe, offset 0x380
+	0x380: 0x0e, 0x381: 0x0e, 0x382: 0x0e, 0x383: 0x0e, 0x384: 0x0e, 0x385: 0x0e, 0x386: 0x0e, 0x387: 0x0e,
+	0x388: 0x0e, 0x389: 0x0e, 0x38a: 0x0e, 0x38b: 0x0e, 0x38c: 0x0e, 0x38d: 0x0e, 0x38e: 0x0e, 0x38f: 0x0e,
+	0x390: 0x0e, 0x391: 0x0e, 0x392: 0x0e, 0x393: 0x0e, 0x394: 0x0e, 0x395: 0x0e, 0x396: 0x0e, 0x397: 0x0e,
+	0x398: 0x0e, 0x399: 0x0e, 0x39a: 0x0e, 0x39b: 0x0e, 0x39c: 0x0e, 0x39d: 0x0e, 0x39e: 0x0e, 0x39f: 0x46,
+	0x3a0: 0x0e, 0x3a1: 0x0e, 0x3a2: 0x0e, 0x3a3: 0x0e, 0x3a4: 0x0e, 0x3a5: 0x0e, 0x3a6: 0x0e, 0x3a7: 0x0e,
+	0x3a8: 0x0e, 0x3a9: 0x0e, 0x3aa: 0x0e, 0x3ab: 0x47,
+	// Block 0xf, offset 0x3c0
+	0x3c0: 0x0e, 0x3c1: 0x0e, 0x3c2: 0x0e, 0x3c3: 0x0e, 0x3c4: 0x48, 0x3c5: 0x49, 0x3c6: 0x0e, 0x3c7: 0x0e,
+	0x3c8: 0x0e, 0x3c9: 0x0e, 0x3ca: 0x0e, 0x3cb: 0x4a,
+	// Block 0x10, offset 0x400
+	0x400: 0x4b, 0x403: 0x4c, 0x404: 0x4d, 0x405: 0x4e, 0x406: 0x4f,
+	0x408: 0x50, 0x409: 0x51, 0x40c: 0x52, 0x40d: 0x53, 0x40e: 0x54, 0x40f: 0x55,
+	0x410: 0x3a, 0x411: 0x56, 0x412: 0x0e, 0x413: 0x57, 0x414: 0x58, 0x415: 0x59, 0x416: 0x5a, 0x417: 0x5b,
+	0x418: 0x0e, 0x419: 0x5c, 0x41a: 0x0e, 0x41b: 0x5d,
+	0x424: 0x5e, 0x425: 0x5f, 0x426: 0x60, 0x427: 0x61,
+	// Block 0x11, offset 0x440
+	0x456: 0x0b, 0x457: 0x06,
+	0x458: 0x0c, 0x45b: 0x0d, 0x45f: 0x0e,
+	0x460: 0x06, 0x461: 0x06, 0x462: 0x06, 0x463: 0x06, 0x464: 0x06, 0x465: 0x06, 0x466: 0x06, 0x467: 0x06,
+	0x468: 0x06, 0x469: 0x06, 0x46a: 0x06, 0x46b: 0x06, 0x46c: 0x06, 0x46d: 0x06, 0x46e: 0x06, 0x46f: 0x06,
+	0x470: 0x06, 0x471: 0x06, 0x472: 0x06, 0x473: 0x06, 0x474: 0x06, 0x475: 0x06, 0x476: 0x06, 0x477: 0x06,
+	0x478: 0x06, 0x479: 0x06, 0x47a: 0x06, 0x47b: 0x06, 0x47c: 0x06, 0x47d: 0x06, 0x47e: 0x06, 0x47f: 0x06,
+	// Block 0x12, offset 0x480
+	0x484: 0x08, 0x485: 0x08, 0x486: 0x08, 0x487: 0x09,
+	// Block 0x13, offset 0x4c0
+	0x4c0: 0x08, 0x4c1: 0x08, 0x4c2: 0x08, 0x4c3: 0x08, 0x4c4: 0x08, 0x4c5: 0x08, 0x4c6: 0x08, 0x4c7: 0x08,
+	0x4c8: 0x08, 0x4c9: 0x08, 0x4ca: 0x08, 0x4cb: 0x08, 0x4cc: 0x08, 0x4cd: 0x08, 0x4ce: 0x08, 0x4cf: 0x08,
+	0x4d0: 0x08, 0x4d1: 0x08, 0x4d2: 0x08, 0x4d3: 0x08, 0x4d4: 0x08, 0x4d5: 0x08, 0x4d6: 0x08, 0x4d7: 0x08,
+	0x4d8: 0x08, 0x4d9: 0x08, 0x4da: 0x08, 0x4db: 0x08, 0x4dc: 0x08, 0x4dd: 0x08, 0x4de: 0x08, 0x4df: 0x08,
+	0x4e0: 0x08, 0x4e1: 0x08, 0x4e2: 0x08, 0x4e3: 0x08, 0x4e4: 0x08, 0x4e5: 0x08, 0x4e6: 0x08, 0x4e7: 0x08,
+	0x4e8: 0x08, 0x4e9: 0x08, 0x4ea: 0x08, 0x4eb: 0x08, 0x4ec: 0x08, 0x4ed: 0x08, 0x4ee: 0x08, 0x4ef: 0x08,
+	0x4f0: 0x08, 0x4f1: 0x08, 0x4f2: 0x08, 0x4f3: 0x08, 0x4f4: 0x08, 0x4f5: 0x08, 0x4f6: 0x08, 0x4f7: 0x08,
+	0x4f8: 0x08, 0x4f9: 0x08, 0x4fa: 0x08, 0x4fb: 0x08, 0x4fc: 0x08, 0x4fd: 0x08, 0x4fe: 0x08, 0x4ff: 0x62,
+	// Block 0x14, offset 0x500
+	0x520: 0x10,
+	0x530: 0x09, 0x531: 0x09, 0x532: 0x09, 0x533: 0x09, 0x534: 0x09, 0x535: 0x09, 0x536: 0x09, 0x537: 0x09,
+	0x538: 0x09, 0x539: 0x09, 0x53a: 0x09, 0x53b: 0x09, 0x53c: 0x09, 0x53d: 0x09, 0x53e: 0x09, 0x53f: 0x11,
+	// Block 0x15, offset 0x540
+	0x540: 0x09, 0x541: 0x09, 0x542: 0x09, 0x543: 0x09, 0x544: 0x09, 0x545: 0x09, 0x546: 0x09, 0x547: 0x09,
+	0x548: 0x09, 0x549: 0x09, 0x54a: 0x09, 0x54b: 0x09, 0x54c: 0x09, 0x54d: 0x09, 0x54e: 0x09, 0x54f: 0x11,
+}
+
+// inverseData contains 4-byte entries of the following format:
+//   <length> <modified UTF-8-encoded rune> <0 padding>
+// The last byte of the UTF-8-encoded rune is xor-ed with the last byte of the
+// UTF-8 encoding of the original rune. Mappings often have the following
+// pattern:
+//   A -> A  (U+FF21 -> U+0041)
+//   B -> B  (U+FF22 -> U+0042)
+//   ...
+// By xor-ing the last byte the same entry can be shared by many mappings. This
+// reduces the total number of distinct entries by about two thirds.
+// The resulting entry for the aforementioned mappings is
+//   { 0x01, 0xE0, 0x00, 0x00 }
+// Using this entry to map U+FF21 (UTF-8 [EF BC A1]), we get
+//   E0 ^ A1 = 41.
+// Similarly, for U+FF22 (UTF-8 [EF BC A2]), we get
+//   E0 ^ A2 = 42.
+// Note that because of the xor-ing, the byte sequence stored in the entry is
+// not valid UTF-8.
+var inverseData = [150][4]byte{
+	{0x00, 0x00, 0x00, 0x00},
+	{0x03, 0xe3, 0x80, 0xa0},
+	{0x03, 0xef, 0xbc, 0xa0},
+	{0x03, 0xef, 0xbc, 0xe0},
+	{0x03, 0xef, 0xbd, 0xe0},
+	{0x03, 0xef, 0xbf, 0x02},
+	{0x03, 0xef, 0xbf, 0x00},
+	{0x03, 0xef, 0xbf, 0x0e},
+	{0x03, 0xef, 0xbf, 0x0c},
+	{0x03, 0xef, 0xbf, 0x0f},
+	{0x03, 0xef, 0xbf, 0x39},
+	{0x03, 0xef, 0xbf, 0x3b},
+	{0x03, 0xef, 0xbf, 0x3f},
+	{0x03, 0xef, 0xbf, 0x2a},
+	{0x03, 0xef, 0xbf, 0x0d},
+	{0x03, 0xef, 0xbf, 0x25},
+	{0x03, 0xef, 0xbd, 0x1a},
+	{0x03, 0xef, 0xbd, 0x26},
+	{0x01, 0xa0, 0x00, 0x00},
+	{0x03, 0xef, 0xbd, 0x25},
+	{0x03, 0xef, 0xbd, 0x23},
+	{0x03, 0xef, 0xbd, 0x2e},
+	{0x03, 0xef, 0xbe, 0x07},
+	{0x03, 0xef, 0xbe, 0x05},
+	{0x03, 0xef, 0xbd, 0x06},
+	{0x03, 0xef, 0xbd, 0x13},
+	{0x03, 0xef, 0xbd, 0x0b},
+	{0x03, 0xef, 0xbd, 0x16},
+	{0x03, 0xef, 0xbd, 0x0c},
+	{0x03, 0xef, 0xbd, 0x15},
+	{0x03, 0xef, 0xbd, 0x0d},
+	{0x03, 0xef, 0xbd, 0x1c},
+	{0x03, 0xef, 0xbd, 0x02},
+	{0x03, 0xef, 0xbd, 0x1f},
+	{0x03, 0xef, 0xbd, 0x1d},
+	{0x03, 0xef, 0xbd, 0x17},
+	{0x03, 0xef, 0xbd, 0x08},
+	{0x03, 0xef, 0xbd, 0x09},
+	{0x03, 0xef, 0xbd, 0x0e},
+	{0x03, 0xef, 0xbd, 0x04},
+	{0x03, 0xef, 0xbd, 0x05},
+	{0x03, 0xef, 0xbe, 0x3f},
+	{0x03, 0xef, 0xbe, 0x00},
+	{0x03, 0xef, 0xbd, 0x2c},
+	{0x03, 0xef, 0xbe, 0x06},
+	{0x03, 0xef, 0xbe, 0x0c},
+	{0x03, 0xef, 0xbe, 0x0f},
+	{0x03, 0xef, 0xbe, 0x0d},
+	{0x03, 0xef, 0xbe, 0x0b},
+	{0x03, 0xef, 0xbe, 0x19},
+	{0x03, 0xef, 0xbe, 0x15},
+	{0x03, 0xef, 0xbe, 0x11},
+	{0x03, 0xef, 0xbe, 0x31},
+	{0x03, 0xef, 0xbe, 0x33},
+	{0x03, 0xef, 0xbd, 0x0f},
+	{0x03, 0xef, 0xbe, 0x30},
+	{0x03, 0xef, 0xbe, 0x3e},
+	{0x03, 0xef, 0xbe, 0x32},
+	{0x03, 0xef, 0xbe, 0x36},
+	{0x03, 0xef, 0xbd, 0x14},
+	{0x03, 0xef, 0xbe, 0x2e},
+	{0x03, 0xef, 0xbd, 0x1e},
+	{0x03, 0xef, 0xbe, 0x10},
+	{0x03, 0xef, 0xbf, 0x13},
+	{0x03, 0xef, 0xbf, 0x15},
+	{0x03, 0xef, 0xbf, 0x17},
+	{0x03, 0xef, 0xbf, 0x1f},
+	{0x03, 0xef, 0xbf, 0x1d},
+	{0x03, 0xef, 0xbf, 0x1b},
+	{0x03, 0xef, 0xbf, 0x09},
+	{0x03, 0xef, 0xbf, 0x0b},
+	{0x03, 0xef, 0xbf, 0x37},
+	{0x03, 0xef, 0xbe, 0x04},
+	{0x01, 0xe0, 0x00, 0x00},
+	{0x03, 0xe2, 0xa6, 0x1a},
+	{0x03, 0xe2, 0xa6, 0x26},
+	{0x03, 0xe3, 0x80, 0x23},
+	{0x03, 0xe3, 0x80, 0x2e},
+	{0x03, 0xe3, 0x80, 0x25},
+	{0x03, 0xe3, 0x83, 0x1e},
+	{0x03, 0xe3, 0x83, 0x14},
+	{0x03, 0xe3, 0x82, 0x06},
+	{0x03, 0xe3, 0x82, 0x0b},
+	{0x03, 0xe3, 0x82, 0x0c},
+	{0x03, 0xe3, 0x82, 0x0d},
+	{0x03, 0xe3, 0x82, 0x02},
+	{0x03, 0xe3, 0x83, 0x0f},
+	{0x03, 0xe3, 0x83, 0x08},
+	{0x03, 0xe3, 0x83, 0x09},
+	{0x03, 0xe3, 0x83, 0x2c},
+	{0x03, 0xe3, 0x83, 0x0c},
+	{0x03, 0xe3, 0x82, 0x13},
+	{0x03, 0xe3, 0x82, 0x16},
+	{0x03, 0xe3, 0x82, 0x15},
+	{0x03, 0xe3, 0x82, 0x1c},
+	{0x03, 0xe3, 0x82, 0x1f},
+	{0x03, 0xe3, 0x82, 0x1d},
+	{0x03, 0xe3, 0x82, 0x1a},
+	{0x03, 0xe3, 0x82, 0x17},
+	{0x03, 0xe3, 0x82, 0x08},
+	{0x03, 0xe3, 0x82, 0x09},
+	{0x03, 0xe3, 0x82, 0x0e},
+	{0x03, 0xe3, 0x82, 0x04},
+	{0x03, 0xe3, 0x82, 0x05},
+	{0x03, 0xe3, 0x82, 0x3f},
+	{0x03, 0xe3, 0x83, 0x00},
+	{0x03, 0xe3, 0x83, 0x06},
+	{0x03, 0xe3, 0x83, 0x05},
+	{0x03, 0xe3, 0x83, 0x0d},
+	{0x03, 0xe3, 0x83, 0x0b},
+	{0x03, 0xe3, 0x83, 0x07},
+	{0x03, 0xe3, 0x83, 0x19},
+	{0x03, 0xe3, 0x83, 0x15},
+	{0x03, 0xe3, 0x83, 0x11},
+	{0x03, 0xe3, 0x83, 0x31},
+	{0x03, 0xe3, 0x83, 0x33},
+	{0x03, 0xe3, 0x83, 0x30},
+	{0x03, 0xe3, 0x83, 0x3e},
+	{0x03, 0xe3, 0x83, 0x32},
+	{0x03, 0xe3, 0x83, 0x36},
+	{0x03, 0xe3, 0x83, 0x2e},
+	{0x03, 0xe3, 0x82, 0x07},
+	{0x03, 0xe3, 0x85, 0x04},
+	{0x03, 0xe3, 0x84, 0x10},
+	{0x03, 0xe3, 0x85, 0x30},
+	{0x03, 0xe3, 0x85, 0x0d},
+	{0x03, 0xe3, 0x85, 0x13},
+	{0x03, 0xe3, 0x85, 0x15},
+	{0x03, 0xe3, 0x85, 0x17},
+	{0x03, 0xe3, 0x85, 0x1f},
+	{0x03, 0xe3, 0x85, 0x1d},
+	{0x03, 0xe3, 0x85, 0x1b},
+	{0x03, 0xe3, 0x85, 0x09},
+	{0x03, 0xe3, 0x85, 0x0f},
+	{0x03, 0xe3, 0x85, 0x0b},
+	{0x03, 0xe3, 0x85, 0x37},
+	{0x03, 0xe3, 0x85, 0x3b},
+	{0x03, 0xe3, 0x85, 0x39},
+	{0x03, 0xe3, 0x85, 0x3f},
+	{0x02, 0xc2, 0x02, 0x00},
+	{0x02, 0xc2, 0x0e, 0x00},
+	{0x02, 0xc2, 0x0c, 0x00},
+	{0x02, 0xc2, 0x00, 0x00},
+	{0x03, 0xe2, 0x82, 0x0f},
+	{0x03, 0xe2, 0x94, 0x2a},
+	{0x03, 0xe2, 0x86, 0x39},
+	{0x03, 0xe2, 0x86, 0x3b},
+	{0x03, 0xe2, 0x86, 0x3f},
+	{0x03, 0xe2, 0x96, 0x0d},
+	{0x03, 0xe2, 0x97, 0x25},
+}
+
+// Total table size 14936 bytes (14KiB)
diff --git a/vendor/golang.org/x/text/width/tables9.0.0.go b/vendor/golang.org/x/text/width/tables9.0.0.go
new file mode 100644
index 00000000..7069e263
--- /dev/null
+++ b/vendor/golang.org/x/text/width/tables9.0.0.go
@@ -0,0 +1,1286 @@
+// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
+
+// +build !go1.10
+
+package width
+
+// UnicodeVersion is the Unicode version from which the tables in this package are derived.
+const UnicodeVersion = "9.0.0"
+
+// lookup returns the trie value for the first UTF-8 encoding in s and
+// the width in bytes of this encoding. The size will be 0 if s does not
+// hold enough bytes to complete the encoding. len(s) must be greater than 0.
+func (t *widthTrie) lookup(s []byte) (v uint16, sz int) {
+	c0 := s[0]
+	switch {
+	case c0 < 0x80: // is ASCII
+		return widthValues[c0], 1
+	case c0 < 0xC2:
+		return 0, 1 // Illegal UTF-8: not a starter, not ASCII.
+	case c0 < 0xE0: // 2-byte UTF-8
+		if len(s) < 2 {
+			return 0, 0
+		}
+		i := widthIndex[c0]
+		c1 := s[1]
+		if c1 < 0x80 || 0xC0 <= c1 {
+			return 0, 1 // Illegal UTF-8: not a continuation byte.
+		}
+		return t.lookupValue(uint32(i), c1), 2
+	case c0 < 0xF0: // 3-byte UTF-8
+		if len(s) < 3 {
+			return 0, 0
+		}
+		i := widthIndex[c0]
+		c1 := s[1]
+		if c1 < 0x80 || 0xC0 <= c1 {
+			return 0, 1 // Illegal UTF-8: not a continuation byte.
+		}
+		o := uint32(i)<<6 + uint32(c1)
+		i = widthIndex[o]
+		c2 := s[2]
+		if c2 < 0x80 || 0xC0 <= c2 {
+			return 0, 2 // Illegal UTF-8: not a continuation byte.
+		}
+		return t.lookupValue(uint32(i), c2), 3
+	case c0 < 0xF8: // 4-byte UTF-8
+		if len(s) < 4 {
+			return 0, 0
+		}
+		i := widthIndex[c0]
+		c1 := s[1]
+		if c1 < 0x80 || 0xC0 <= c1 {
+			return 0, 1 // Illegal UTF-8: not a continuation byte.
+		}
+		o := uint32(i)<<6 + uint32(c1)
+		i = widthIndex[o]
+		c2 := s[2]
+		if c2 < 0x80 || 0xC0 <= c2 {
+			return 0, 2 // Illegal UTF-8: not a continuation byte.
+		}
+		o = uint32(i)<<6 + uint32(c2)
+		i = widthIndex[o]
+		c3 := s[3]
+		if c3 < 0x80 || 0xC0 <= c3 {
+			return 0, 3 // Illegal UTF-8: not a continuation byte.
+		}
+		return t.lookupValue(uint32(i), c3), 4
+	}
+	// Illegal rune
+	return 0, 1
+}
+
+// lookupUnsafe returns the trie value for the first UTF-8 encoding in s.
+// s must start with a full and valid UTF-8 encoded rune.
+func (t *widthTrie) lookupUnsafe(s []byte) uint16 {
+	c0 := s[0]
+	if c0 < 0x80 { // is ASCII
+		return widthValues[c0]
+	}
+	i := widthIndex[c0]
+	if c0 < 0xE0 { // 2-byte UTF-8
+		return t.lookupValue(uint32(i), s[1])
+	}
+	i = widthIndex[uint32(i)<<6+uint32(s[1])]
+	if c0 < 0xF0 { // 3-byte UTF-8
+		return t.lookupValue(uint32(i), s[2])
+	}
+	i = widthIndex[uint32(i)<<6+uint32(s[2])]
+	if c0 < 0xF8 { // 4-byte UTF-8
+		return t.lookupValue(uint32(i), s[3])
+	}
+	return 0
+}
+
+// lookupString returns the trie value for the first UTF-8 encoding in s and
+// the width in bytes of this encoding. The size will be 0 if s does not
+// hold enough bytes to complete the encoding. len(s) must be greater than 0.
+func (t *widthTrie) lookupString(s string) (v uint16, sz int) {
+	c0 := s[0]
+	switch {
+	case c0 < 0x80: // is ASCII
+		return widthValues[c0], 1
+	case c0 < 0xC2:
+		return 0, 1 // Illegal UTF-8: not a starter, not ASCII.
+	case c0 < 0xE0: // 2-byte UTF-8
+		if len(s) < 2 {
+			return 0, 0
+		}
+		i := widthIndex[c0]
+		c1 := s[1]
+		if c1 < 0x80 || 0xC0 <= c1 {
+			return 0, 1 // Illegal UTF-8: not a continuation byte.
+		}
+		return t.lookupValue(uint32(i), c1), 2
+	case c0 < 0xF0: // 3-byte UTF-8
+		if len(s) < 3 {
+			return 0, 0
+		}
+		i := widthIndex[c0]
+		c1 := s[1]
+		if c1 < 0x80 || 0xC0 <= c1 {
+			return 0, 1 // Illegal UTF-8: not a continuation byte.
+		}
+		o := uint32(i)<<6 + uint32(c1)
+		i = widthIndex[o]
+		c2 := s[2]
+		if c2 < 0x80 || 0xC0 <= c2 {
+			return 0, 2 // Illegal UTF-8: not a continuation byte.
+		}
+		return t.lookupValue(uint32(i), c2), 3
+	case c0 < 0xF8: // 4-byte UTF-8
+		if len(s) < 4 {
+			return 0, 0
+		}
+		i := widthIndex[c0]
+		c1 := s[1]
+		if c1 < 0x80 || 0xC0 <= c1 {
+			return 0, 1 // Illegal UTF-8: not a continuation byte.
+		}
+		o := uint32(i)<<6 + uint32(c1)
+		i = widthIndex[o]
+		c2 := s[2]
+		if c2 < 0x80 || 0xC0 <= c2 {
+			return 0, 2 // Illegal UTF-8: not a continuation byte.
+		}
+		o = uint32(i)<<6 + uint32(c2)
+		i = widthIndex[o]
+		c3 := s[3]
+		if c3 < 0x80 || 0xC0 <= c3 {
+			return 0, 3 // Illegal UTF-8: not a continuation byte.
+		}
+		return t.lookupValue(uint32(i), c3), 4
+	}
+	// Illegal rune
+	return 0, 1
+}
+
+// lookupStringUnsafe returns the trie value for the first UTF-8 encoding in s.
+// s must start with a full and valid UTF-8 encoded rune.
+func (t *widthTrie) lookupStringUnsafe(s string) uint16 {
+	c0 := s[0]
+	if c0 < 0x80 { // is ASCII
+		return widthValues[c0]
+	}
+	i := widthIndex[c0]
+	if c0 < 0xE0 { // 2-byte UTF-8
+		return t.lookupValue(uint32(i), s[1])
+	}
+	i = widthIndex[uint32(i)<<6+uint32(s[1])]
+	if c0 < 0xF0 { // 3-byte UTF-8
+		return t.lookupValue(uint32(i), s[2])
+	}
+	i = widthIndex[uint32(i)<<6+uint32(s[2])]
+	if c0 < 0xF8 { // 4-byte UTF-8
+		return t.lookupValue(uint32(i), s[3])
+	}
+	return 0
+}
+
+// widthTrie. Total size: 14080 bytes (13.75 KiB). Checksum: 3b8aeb3dc03667a3.
+type widthTrie struct{}
+
+func newWidthTrie(i int) *widthTrie {
+	return &widthTrie{}
+}
+
+// lookupValue determines the type of block n and looks up the value for b.
+func (t *widthTrie) lookupValue(n uint32, b byte) uint16 {
+	switch {
+	default:
+		return uint16(widthValues[n<<6+uint32(b)])
+	}
+}
+
+// widthValues: 99 blocks, 6336 entries, 12672 bytes
+// The third block is the zero block.
+var widthValues = [6336]uint16{
+	// Block 0x0, offset 0x0
+	0x20: 0x6001, 0x21: 0x6002, 0x22: 0x6002, 0x23: 0x6002,
+	0x24: 0x6002, 0x25: 0x6002, 0x26: 0x6002, 0x27: 0x6002, 0x28: 0x6002, 0x29: 0x6002,
+	0x2a: 0x6002, 0x2b: 0x6002, 0x2c: 0x6002, 0x2d: 0x6002, 0x2e: 0x6002, 0x2f: 0x6002,
+	0x30: 0x6002, 0x31: 0x6002, 0x32: 0x6002, 0x33: 0x6002, 0x34: 0x6002, 0x35: 0x6002,
+	0x36: 0x6002, 0x37: 0x6002, 0x38: 0x6002, 0x39: 0x6002, 0x3a: 0x6002, 0x3b: 0x6002,
+	0x3c: 0x6002, 0x3d: 0x6002, 0x3e: 0x6002, 0x3f: 0x6002,
+	// Block 0x1, offset 0x40
+	0x40: 0x6003, 0x41: 0x6003, 0x42: 0x6003, 0x43: 0x6003, 0x44: 0x6003, 0x45: 0x6003,
+	0x46: 0x6003, 0x47: 0x6003, 0x48: 0x6003, 0x49: 0x6003, 0x4a: 0x6003, 0x4b: 0x6003,
+	0x4c: 0x6003, 0x4d: 0x6003, 0x4e: 0x6003, 0x4f: 0x6003, 0x50: 0x6003, 0x51: 0x6003,
+	0x52: 0x6003, 0x53: 0x6003, 0x54: 0x6003, 0x55: 0x6003, 0x56: 0x6003, 0x57: 0x6003,
+	0x58: 0x6003, 0x59: 0x6003, 0x5a: 0x6003, 0x5b: 0x6003, 0x5c: 0x6003, 0x5d: 0x6003,
+	0x5e: 0x6003, 0x5f: 0x6003, 0x60: 0x6004, 0x61: 0x6004, 0x62: 0x6004, 0x63: 0x6004,
+	0x64: 0x6004, 0x65: 0x6004, 0x66: 0x6004, 0x67: 0x6004, 0x68: 0x6004, 0x69: 0x6004,
+	0x6a: 0x6004, 0x6b: 0x6004, 0x6c: 0x6004, 0x6d: 0x6004, 0x6e: 0x6004, 0x6f: 0x6004,
+	0x70: 0x6004, 0x71: 0x6004, 0x72: 0x6004, 0x73: 0x6004, 0x74: 0x6004, 0x75: 0x6004,
+	0x76: 0x6004, 0x77: 0x6004, 0x78: 0x6004, 0x79: 0x6004, 0x7a: 0x6004, 0x7b: 0x6004,
+	0x7c: 0x6004, 0x7d: 0x6004, 0x7e: 0x6004,
+	// Block 0x2, offset 0x80
+	// Block 0x3, offset 0xc0
+	0xe1: 0x2000, 0xe2: 0x6005, 0xe3: 0x6005,
+	0xe4: 0x2000, 0xe5: 0x6006, 0xe6: 0x6005, 0xe7: 0x2000, 0xe8: 0x2000,
+	0xea: 0x2000, 0xec: 0x6007, 0xed: 0x2000, 0xee: 0x2000, 0xef: 0x6008,
+	0xf0: 0x2000, 0xf1: 0x2000, 0xf2: 0x2000, 0xf3: 0x2000, 0xf4: 0x2000,
+	0xf6: 0x2000, 0xf7: 0x2000, 0xf8: 0x2000, 0xf9: 0x2000, 0xfa: 0x2000,
+	0xfc: 0x2000, 0xfd: 0x2000, 0xfe: 0x2000, 0xff: 0x2000,
+	// Block 0x4, offset 0x100
+	0x106: 0x2000,
+	0x110: 0x2000,
+	0x117: 0x2000,
+	0x118: 0x2000,
+	0x11e: 0x2000, 0x11f: 0x2000, 0x120: 0x2000, 0x121: 0x2000,
+	0x126: 0x2000, 0x128: 0x2000, 0x129: 0x2000,
+	0x12a: 0x2000, 0x12c: 0x2000, 0x12d: 0x2000,
+	0x130: 0x2000, 0x132: 0x2000, 0x133: 0x2000,
+	0x137: 0x2000, 0x138: 0x2000, 0x139: 0x2000, 0x13a: 0x2000,
+	0x13c: 0x2000, 0x13e: 0x2000,
+	// Block 0x5, offset 0x140
+	0x141: 0x2000,
+	0x151: 0x2000,
+	0x153: 0x2000,
+	0x15b: 0x2000,
+	0x166: 0x2000, 0x167: 0x2000,
+	0x16b: 0x2000,
+	0x171: 0x2000, 0x172: 0x2000, 0x173: 0x2000,
+	0x178: 0x2000,
+	0x17f: 0x2000,
+	// Block 0x6, offset 0x180
+	0x180: 0x2000, 0x181: 0x2000, 0x182: 0x2000, 0x184: 0x2000,
+	0x188: 0x2000, 0x189: 0x2000, 0x18a: 0x2000, 0x18b: 0x2000,
+	0x18d: 0x2000,
+	0x192: 0x2000, 0x193: 0x2000,
+	0x1a6: 0x2000, 0x1a7: 0x2000,
+	0x1ab: 0x2000,
+	// Block 0x7, offset 0x1c0
+	0x1ce: 0x2000, 0x1d0: 0x2000,
+	0x1d2: 0x2000, 0x1d4: 0x2000, 0x1d6: 0x2000,
+	0x1d8: 0x2000, 0x1da: 0x2000, 0x1dc: 0x2000,
+	// Block 0x8, offset 0x200
+	0x211: 0x2000,
+	0x221: 0x2000,
+	// Block 0x9, offset 0x240
+	0x244: 0x2000,
+	0x247: 0x2000, 0x249: 0x2000, 0x24a: 0x2000, 0x24b: 0x2000,
+	0x24d: 0x2000, 0x250: 0x2000,
+	0x258: 0x2000, 0x259: 0x2000, 0x25a: 0x2000, 0x25b: 0x2000, 0x25d: 0x2000,
+	0x25f: 0x2000,
+	// Block 0xa, offset 0x280
+	0x280: 0x2000, 0x281: 0x2000, 0x282: 0x2000, 0x283: 0x2000, 0x284: 0x2000, 0x285: 0x2000,
+	0x286: 0x2000, 0x287: 0x2000, 0x288: 0x2000, 0x289: 0x2000, 0x28a: 0x2000, 0x28b: 0x2000,
+	0x28c: 0x2000, 0x28d: 0x2000, 0x28e: 0x2000, 0x28f: 0x2000, 0x290: 0x2000, 0x291: 0x2000,
+	0x292: 0x2000, 0x293: 0x2000, 0x294: 0x2000, 0x295: 0x2000, 0x296: 0x2000, 0x297: 0x2000,
+	0x298: 0x2000, 0x299: 0x2000, 0x29a: 0x2000, 0x29b: 0x2000, 0x29c: 0x2000, 0x29d: 0x2000,
+	0x29e: 0x2000, 0x29f: 0x2000, 0x2a0: 0x2000, 0x2a1: 0x2000, 0x2a2: 0x2000, 0x2a3: 0x2000,
+	0x2a4: 0x2000, 0x2a5: 0x2000, 0x2a6: 0x2000, 0x2a7: 0x2000, 0x2a8: 0x2000, 0x2a9: 0x2000,
+	0x2aa: 0x2000, 0x2ab: 0x2000, 0x2ac: 0x2000, 0x2ad: 0x2000, 0x2ae: 0x2000, 0x2af: 0x2000,
+	0x2b0: 0x2000, 0x2b1: 0x2000, 0x2b2: 0x2000, 0x2b3: 0x2000, 0x2b4: 0x2000, 0x2b5: 0x2000,
+	0x2b6: 0x2000, 0x2b7: 0x2000, 0x2b8: 0x2000, 0x2b9: 0x2000, 0x2ba: 0x2000, 0x2bb: 0x2000,
+	0x2bc: 0x2000, 0x2bd: 0x2000, 0x2be: 0x2000, 0x2bf: 0x2000,
+	// Block 0xb, offset 0x2c0
+	0x2c0: 0x2000, 0x2c1: 0x2000, 0x2c2: 0x2000, 0x2c3: 0x2000, 0x2c4: 0x2000, 0x2c5: 0x2000,
+	0x2c6: 0x2000, 0x2c7: 0x2000, 0x2c8: 0x2000, 0x2c9: 0x2000, 0x2ca: 0x2000, 0x2cb: 0x2000,
+	0x2cc: 0x2000, 0x2cd: 0x2000, 0x2ce: 0x2000, 0x2cf: 0x2000, 0x2d0: 0x2000, 0x2d1: 0x2000,
+	0x2d2: 0x2000, 0x2d3: 0x2000, 0x2d4: 0x2000, 0x2d5: 0x2000, 0x2d6: 0x2000, 0x2d7: 0x2000,
+	0x2d8: 0x2000, 0x2d9: 0x2000, 0x2da: 0x2000, 0x2db: 0x2000, 0x2dc: 0x2000, 0x2dd: 0x2000,
+	0x2de: 0x2000, 0x2df: 0x2000, 0x2e0: 0x2000, 0x2e1: 0x2000, 0x2e2: 0x2000, 0x2e3: 0x2000,
+	0x2e4: 0x2000, 0x2e5: 0x2000, 0x2e6: 0x2000, 0x2e7: 0x2000, 0x2e8: 0x2000, 0x2e9: 0x2000,
+	0x2ea: 0x2000, 0x2eb: 0x2000, 0x2ec: 0x2000, 0x2ed: 0x2000, 0x2ee: 0x2000, 0x2ef: 0x2000,
+	// Block 0xc, offset 0x300
+	0x311: 0x2000,
+	0x312: 0x2000, 0x313: 0x2000, 0x314: 0x2000, 0x315: 0x2000, 0x316: 0x2000, 0x317: 0x2000,
+	0x318: 0x2000, 0x319: 0x2000, 0x31a: 0x2000, 0x31b: 0x2000, 0x31c: 0x2000, 0x31d: 0x2000,
+	0x31e: 0x2000, 0x31f: 0x2000, 0x320: 0x2000, 0x321: 0x2000, 0x323: 0x2000,
+	0x324: 0x2000, 0x325: 0x2000, 0x326: 0x2000, 0x327: 0x2000, 0x328: 0x2000, 0x329: 0x2000,
+	0x331: 0x2000, 0x332: 0x2000, 0x333: 0x2000, 0x334: 0x2000, 0x335: 0x2000,
+	0x336: 0x2000, 0x337: 0x2000, 0x338: 0x2000, 0x339: 0x2000, 0x33a: 0x2000, 0x33b: 0x2000,
+	0x33c: 0x2000, 0x33d: 0x2000, 0x33e: 0x2000, 0x33f: 0x2000,
+	// Block 0xd, offset 0x340
+	0x340: 0x2000, 0x341: 0x2000, 0x343: 0x2000, 0x344: 0x2000, 0x345: 0x2000,
+	0x346: 0x2000, 0x347: 0x2000, 0x348: 0x2000, 0x349: 0x2000,
+	// Block 0xe, offset 0x380
+	0x381: 0x2000,
+	0x390: 0x2000, 0x391: 0x2000,
+	0x392: 0x2000, 0x393: 0x2000, 0x394: 0x2000, 0x395: 0x2000, 0x396: 0x2000, 0x397: 0x2000,
+	0x398: 0x2000, 0x399: 0x2000, 0x39a: 0x2000, 0x39b: 0x2000, 0x39c: 0x2000, 0x39d: 0x2000,
+	0x39e: 0x2000, 0x39f: 0x2000, 0x3a0: 0x2000, 0x3a1: 0x2000, 0x3a2: 0x2000, 0x3a3: 0x2000,
+	0x3a4: 0x2000, 0x3a5: 0x2000, 0x3a6: 0x2000, 0x3a7: 0x2000, 0x3a8: 0x2000, 0x3a9: 0x2000,
+	0x3aa: 0x2000, 0x3ab: 0x2000, 0x3ac: 0x2000, 0x3ad: 0x2000, 0x3ae: 0x2000, 0x3af: 0x2000,
+	0x3b0: 0x2000, 0x3b1: 0x2000, 0x3b2: 0x2000, 0x3b3: 0x2000, 0x3b4: 0x2000, 0x3b5: 0x2000,
+	0x3b6: 0x2000, 0x3b7: 0x2000, 0x3b8: 0x2000, 0x3b9: 0x2000, 0x3ba: 0x2000, 0x3bb: 0x2000,
+	0x3bc: 0x2000, 0x3bd: 0x2000, 0x3be: 0x2000, 0x3bf: 0x2000,
+	// Block 0xf, offset 0x3c0
+	0x3c0: 0x2000, 0x3c1: 0x2000, 0x3c2: 0x2000, 0x3c3: 0x2000, 0x3c4: 0x2000, 0x3c5: 0x2000,
+	0x3c6: 0x2000, 0x3c7: 0x2000, 0x3c8: 0x2000, 0x3c9: 0x2000, 0x3ca: 0x2000, 0x3cb: 0x2000,
+	0x3cc: 0x2000, 0x3cd: 0x2000, 0x3ce: 0x2000, 0x3cf: 0x2000, 0x3d1: 0x2000,
+	// Block 0x10, offset 0x400
+	0x400: 0x4000, 0x401: 0x4000, 0x402: 0x4000, 0x403: 0x4000, 0x404: 0x4000, 0x405: 0x4000,
+	0x406: 0x4000, 0x407: 0x4000, 0x408: 0x4000, 0x409: 0x4000, 0x40a: 0x4000, 0x40b: 0x4000,
+	0x40c: 0x4000, 0x40d: 0x4000, 0x40e: 0x4000, 0x40f: 0x4000, 0x410: 0x4000, 0x411: 0x4000,
+	0x412: 0x4000, 0x413: 0x4000, 0x414: 0x4000, 0x415: 0x4000, 0x416: 0x4000, 0x417: 0x4000,
+	0x418: 0x4000, 0x419: 0x4000, 0x41a: 0x4000, 0x41b: 0x4000, 0x41c: 0x4000, 0x41d: 0x4000,
+	0x41e: 0x4000, 0x41f: 0x4000, 0x420: 0x4000, 0x421: 0x4000, 0x422: 0x4000, 0x423: 0x4000,
+	0x424: 0x4000, 0x425: 0x4000, 0x426: 0x4000, 0x427: 0x4000, 0x428: 0x4000, 0x429: 0x4000,
+	0x42a: 0x4000, 0x42b: 0x4000, 0x42c: 0x4000, 0x42d: 0x4000, 0x42e: 0x4000, 0x42f: 0x4000,
+	0x430: 0x4000, 0x431: 0x4000, 0x432: 0x4000, 0x433: 0x4000, 0x434: 0x4000, 0x435: 0x4000,
+	0x436: 0x4000, 0x437: 0x4000, 0x438: 0x4000, 0x439: 0x4000, 0x43a: 0x4000, 0x43b: 0x4000,
+	0x43c: 0x4000, 0x43d: 0x4000, 0x43e: 0x4000, 0x43f: 0x4000,
+	// Block 0x11, offset 0x440
+	0x440: 0x4000, 0x441: 0x4000, 0x442: 0x4000, 0x443: 0x4000, 0x444: 0x4000, 0x445: 0x4000,
+	0x446: 0x4000, 0x447: 0x4000, 0x448: 0x4000, 0x449: 0x4000, 0x44a: 0x4000, 0x44b: 0x4000,
+	0x44c: 0x4000, 0x44d: 0x4000, 0x44e: 0x4000, 0x44f: 0x4000, 0x450: 0x4000, 0x451: 0x4000,
+	0x452: 0x4000, 0x453: 0x4000, 0x454: 0x4000, 0x455: 0x4000, 0x456: 0x4000, 0x457: 0x4000,
+	0x458: 0x4000, 0x459: 0x4000, 0x45a: 0x4000, 0x45b: 0x4000, 0x45c: 0x4000, 0x45d: 0x4000,
+	0x45e: 0x4000, 0x45f: 0x4000,
+	// Block 0x12, offset 0x480
+	0x490: 0x2000,
+	0x493: 0x2000, 0x494: 0x2000, 0x495: 0x2000, 0x496: 0x2000,
+	0x498: 0x2000, 0x499: 0x2000, 0x49c: 0x2000, 0x49d: 0x2000,
+	0x4a0: 0x2000, 0x4a1: 0x2000, 0x4a2: 0x2000,
+	0x4a4: 0x2000, 0x4a5: 0x2000, 0x4a6: 0x2000, 0x4a7: 0x2000,
+	0x4b0: 0x2000, 0x4b2: 0x2000, 0x4b3: 0x2000, 0x4b5: 0x2000,
+	0x4bb: 0x2000,
+	0x4be: 0x2000,
+	// Block 0x13, offset 0x4c0
+	0x4f4: 0x2000,
+	0x4ff: 0x2000,
+	// Block 0x14, offset 0x500
+	0x501: 0x2000, 0x502: 0x2000, 0x503: 0x2000, 0x504: 0x2000,
+	0x529: 0xa009,
+	0x52c: 0x2000,
+	// Block 0x15, offset 0x540
+	0x543: 0x2000, 0x545: 0x2000,
+	0x549: 0x2000,
+	0x553: 0x2000, 0x556: 0x2000,
+	0x561: 0x2000, 0x562: 0x2000,
+	0x566: 0x2000,
+	0x56b: 0x2000,
+	// Block 0x16, offset 0x580
+	0x593: 0x2000, 0x594: 0x2000,
+	0x59b: 0x2000, 0x59c: 0x2000, 0x59d: 0x2000,
+	0x59e: 0x2000, 0x5a0: 0x2000, 0x5a1: 0x2000, 0x5a2: 0x2000, 0x5a3: 0x2000,
+	0x5a4: 0x2000, 0x5a5: 0x2000, 0x5a6: 0x2000, 0x5a7: 0x2000, 0x5a8: 0x2000, 0x5a9: 0x2000,
+	0x5aa: 0x2000, 0x5ab: 0x2000,
+	0x5b0: 0x2000, 0x5b1: 0x2000, 0x5b2: 0x2000, 0x5b3: 0x2000, 0x5b4: 0x2000, 0x5b5: 0x2000,
+	0x5b6: 0x2000, 0x5b7: 0x2000, 0x5b8: 0x2000, 0x5b9: 0x2000,
+	// Block 0x17, offset 0x5c0
+	0x5c9: 0x2000,
+	0x5d0: 0x200a, 0x5d1: 0x200b,
+	0x5d2: 0x200a, 0x5d3: 0x200c, 0x5d4: 0x2000, 0x5d5: 0x2000, 0x5d6: 0x2000, 0x5d7: 0x2000,
+	0x5d8: 0x2000, 0x5d9: 0x2000,
+	0x5f8: 0x2000, 0x5f9: 0x2000,
+	// Block 0x18, offset 0x600
+	0x612: 0x2000, 0x614: 0x2000,
+	0x627: 0x2000,
+	// Block 0x19, offset 0x640
+	0x640: 0x2000, 0x642: 0x2000, 0x643: 0x2000,
+	0x647: 0x2000, 0x648: 0x2000, 0x64b: 0x2000,
+	0x64f: 0x2000, 0x651: 0x2000,
+	0x655: 0x2000,
+	0x65a: 0x2000, 0x65d: 0x2000,
+	0x65e: 0x2000, 0x65f: 0x2000, 0x660: 0x2000, 0x663: 0x2000,
+	0x665: 0x2000, 0x667: 0x2000, 0x668: 0x2000, 0x669: 0x2000,
+	0x66a: 0x2000, 0x66b: 0x2000, 0x66c: 0x2000, 0x66e: 0x2000,
+	0x674: 0x2000, 0x675: 0x2000,
+	0x676: 0x2000, 0x677: 0x2000,
+	0x67c: 0x2000, 0x67d: 0x2000,
+	// Block 0x1a, offset 0x680
+	0x688: 0x2000,
+	0x68c: 0x2000,
+	0x692: 0x2000,
+	0x6a0: 0x2000, 0x6a1: 0x2000,
+	0x6a4: 0x2000, 0x6a5: 0x2000, 0x6a6: 0x2000, 0x6a7: 0x2000,
+	0x6aa: 0x2000, 0x6ab: 0x2000, 0x6ae: 0x2000, 0x6af: 0x2000,
+	// Block 0x1b, offset 0x6c0
+	0x6c2: 0x2000, 0x6c3: 0x2000,
+	0x6c6: 0x2000, 0x6c7: 0x2000,
+	0x6d5: 0x2000,
+	0x6d9: 0x2000,
+	0x6e5: 0x2000,
+	0x6ff: 0x2000,
+	// Block 0x1c, offset 0x700
+	0x712: 0x2000,
+	0x71a: 0x4000, 0x71b: 0x4000,
+	0x729: 0x4000,
+	0x72a: 0x4000,
+	// Block 0x1d, offset 0x740
+	0x769: 0x4000,
+	0x76a: 0x4000, 0x76b: 0x4000, 0x76c: 0x4000,
+	0x770: 0x4000, 0x773: 0x4000,
+	// Block 0x1e, offset 0x780
+	0x7a0: 0x2000, 0x7a1: 0x2000, 0x7a2: 0x2000, 0x7a3: 0x2000,
+	0x7a4: 0x2000, 0x7a5: 0x2000, 0x7a6: 0x2000, 0x7a7: 0x2000, 0x7a8: 0x2000, 0x7a9: 0x2000,
+	0x7aa: 0x2000, 0x7ab: 0x2000, 0x7ac: 0x2000, 0x7ad: 0x2000, 0x7ae: 0x2000, 0x7af: 0x2000,
+	0x7b0: 0x2000, 0x7b1: 0x2000, 0x7b2: 0x2000, 0x7b3: 0x2000, 0x7b4: 0x2000, 0x7b5: 0x2000,
+	0x7b6: 0x2000, 0x7b7: 0x2000, 0x7b8: 0x2000, 0x7b9: 0x2000, 0x7ba: 0x2000, 0x7bb: 0x2000,
+	0x7bc: 0x2000, 0x7bd: 0x2000, 0x7be: 0x2000, 0x7bf: 0x2000,
+	// Block 0x1f, offset 0x7c0
+	0x7c0: 0x2000, 0x7c1: 0x2000, 0x7c2: 0x2000, 0x7c3: 0x2000, 0x7c4: 0x2000, 0x7c5: 0x2000,
+	0x7c6: 0x2000, 0x7c7: 0x2000, 0x7c8: 0x2000, 0x7c9: 0x2000, 0x7ca: 0x2000, 0x7cb: 0x2000,
+	0x7cc: 0x2000, 0x7cd: 0x2000, 0x7ce: 0x2000, 0x7cf: 0x2000, 0x7d0: 0x2000, 0x7d1: 0x2000,
+	0x7d2: 0x2000, 0x7d3: 0x2000, 0x7d4: 0x2000, 0x7d5: 0x2000, 0x7d6: 0x2000, 0x7d7: 0x2000,
+	0x7d8: 0x2000, 0x7d9: 0x2000, 0x7da: 0x2000, 0x7db: 0x2000, 0x7dc: 0x2000, 0x7dd: 0x2000,
+	0x7de: 0x2000, 0x7df: 0x2000, 0x7e0: 0x2000, 0x7e1: 0x2000, 0x7e2: 0x2000, 0x7e3: 0x2000,
+	0x7e4: 0x2000, 0x7e5: 0x2000, 0x7e6: 0x2000, 0x7e7: 0x2000, 0x7e8: 0x2000, 0x7e9: 0x2000,
+	0x7eb: 0x2000, 0x7ec: 0x2000, 0x7ed: 0x2000, 0x7ee: 0x2000, 0x7ef: 0x2000,
+	0x7f0: 0x2000, 0x7f1: 0x2000, 0x7f2: 0x2000, 0x7f3: 0x2000, 0x7f4: 0x2000, 0x7f5: 0x2000,
+	0x7f6: 0x2000, 0x7f7: 0x2000, 0x7f8: 0x2000, 0x7f9: 0x2000, 0x7fa: 0x2000, 0x7fb: 0x2000,
+	0x7fc: 0x2000, 0x7fd: 0x2000, 0x7fe: 0x2000, 0x7ff: 0x2000,
+	// Block 0x20, offset 0x800
+	0x800: 0x2000, 0x801: 0x2000, 0x802: 0x200d, 0x803: 0x2000, 0x804: 0x2000, 0x805: 0x2000,
+	0x806: 0x2000, 0x807: 0x2000, 0x808: 0x2000, 0x809: 0x2000, 0x80a: 0x2000, 0x80b: 0x2000,
+	0x80c: 0x2000, 0x80d: 0x2000, 0x80e: 0x2000, 0x80f: 0x2000, 0x810: 0x2000, 0x811: 0x2000,
+	0x812: 0x2000, 0x813: 0x2000, 0x814: 0x2000, 0x815: 0x2000, 0x816: 0x2000, 0x817: 0x2000,
+	0x818: 0x2000, 0x819: 0x2000, 0x81a: 0x2000, 0x81b: 0x2000, 0x81c: 0x2000, 0x81d: 0x2000,
+	0x81e: 0x2000, 0x81f: 0x2000, 0x820: 0x2000, 0x821: 0x2000, 0x822: 0x2000, 0x823: 0x2000,
+	0x824: 0x2000, 0x825: 0x2000, 0x826: 0x2000, 0x827: 0x2000, 0x828: 0x2000, 0x829: 0x2000,
+	0x82a: 0x2000, 0x82b: 0x2000, 0x82c: 0x2000, 0x82d: 0x2000, 0x82e: 0x2000, 0x82f: 0x2000,
+	0x830: 0x2000, 0x831: 0x2000, 0x832: 0x2000, 0x833: 0x2000, 0x834: 0x2000, 0x835: 0x2000,
+	0x836: 0x2000, 0x837: 0x2000, 0x838: 0x2000, 0x839: 0x2000, 0x83a: 0x2000, 0x83b: 0x2000,
+	0x83c: 0x2000, 0x83d: 0x2000, 0x83e: 0x2000, 0x83f: 0x2000,
+	// Block 0x21, offset 0x840
+	0x840: 0x2000, 0x841: 0x2000, 0x842: 0x2000, 0x843: 0x2000, 0x844: 0x2000, 0x845: 0x2000,
+	0x846: 0x2000, 0x847: 0x2000, 0x848: 0x2000, 0x849: 0x2000, 0x84a: 0x2000, 0x84b: 0x2000,
+	0x850: 0x2000, 0x851: 0x2000,
+	0x852: 0x2000, 0x853: 0x2000, 0x854: 0x2000, 0x855: 0x2000, 0x856: 0x2000, 0x857: 0x2000,
+	0x858: 0x2000, 0x859: 0x2000, 0x85a: 0x2000, 0x85b: 0x2000, 0x85c: 0x2000, 0x85d: 0x2000,
+	0x85e: 0x2000, 0x85f: 0x2000, 0x860: 0x2000, 0x861: 0x2000, 0x862: 0x2000, 0x863: 0x2000,
+	0x864: 0x2000, 0x865: 0x2000, 0x866: 0x2000, 0x867: 0x2000, 0x868: 0x2000, 0x869: 0x2000,
+	0x86a: 0x2000, 0x86b: 0x2000, 0x86c: 0x2000, 0x86d: 0x2000, 0x86e: 0x2000, 0x86f: 0x2000,
+	0x870: 0x2000, 0x871: 0x2000, 0x872: 0x2000, 0x873: 0x2000,
+	// Block 0x22, offset 0x880
+	0x880: 0x2000, 0x881: 0x2000, 0x882: 0x2000, 0x883: 0x2000, 0x884: 0x2000, 0x885: 0x2000,
+	0x886: 0x2000, 0x887: 0x2000, 0x888: 0x2000, 0x889: 0x2000, 0x88a: 0x2000, 0x88b: 0x2000,
+	0x88c: 0x2000, 0x88d: 0x2000, 0x88e: 0x2000, 0x88f: 0x2000,
+	0x892: 0x2000, 0x893: 0x2000, 0x894: 0x2000, 0x895: 0x2000,
+	0x8a0: 0x200e, 0x8a1: 0x2000, 0x8a3: 0x2000,
+	0x8a4: 0x2000, 0x8a5: 0x2000, 0x8a6: 0x2000, 0x8a7: 0x2000, 0x8a8: 0x2000, 0x8a9: 0x2000,
+	0x8b2: 0x2000, 0x8b3: 0x2000,
+	0x8b6: 0x2000, 0x8b7: 0x2000,
+	0x8bc: 0x2000, 0x8bd: 0x2000,
+	// Block 0x23, offset 0x8c0
+	0x8c0: 0x2000, 0x8c1: 0x2000,
+	0x8c6: 0x2000, 0x8c7: 0x2000, 0x8c8: 0x2000, 0x8cb: 0x200f,
+	0x8ce: 0x2000, 0x8cf: 0x2000, 0x8d0: 0x2000, 0x8d1: 0x2000,
+	0x8e2: 0x2000, 0x8e3: 0x2000,
+	0x8e4: 0x2000, 0x8e5: 0x2000,
+	0x8ef: 0x2000,
+	0x8fd: 0x4000, 0x8fe: 0x4000,
+	// Block 0x24, offset 0x900
+	0x905: 0x2000,
+	0x906: 0x2000, 0x909: 0x2000,
+	0x90e: 0x2000, 0x90f: 0x2000,
+	0x914: 0x4000, 0x915: 0x4000,
+	0x91c: 0x2000,
+	0x91e: 0x2000,
+	// Block 0x25, offset 0x940
+	0x940: 0x2000, 0x942: 0x2000,
+	0x948: 0x4000, 0x949: 0x4000, 0x94a: 0x4000, 0x94b: 0x4000,
+	0x94c: 0x4000, 0x94d: 0x4000, 0x94e: 0x4000, 0x94f: 0x4000, 0x950: 0x4000, 0x951: 0x4000,
+	0x952: 0x4000, 0x953: 0x4000,
+	0x960: 0x2000, 0x961: 0x2000, 0x963: 0x2000,
+	0x964: 0x2000, 0x965: 0x2000, 0x967: 0x2000, 0x968: 0x2000, 0x969: 0x2000,
+	0x96a: 0x2000, 0x96c: 0x2000, 0x96d: 0x2000, 0x96f: 0x2000,
+	0x97f: 0x4000,
+	// Block 0x26, offset 0x980
+	0x993: 0x4000,
+	0x99e: 0x2000, 0x99f: 0x2000, 0x9a1: 0x4000,
+	0x9aa: 0x4000, 0x9ab: 0x4000,
+	0x9bd: 0x4000, 0x9be: 0x4000, 0x9bf: 0x2000,
+	// Block 0x27, offset 0x9c0
+	0x9c4: 0x4000, 0x9c5: 0x4000,
+	0x9c6: 0x2000, 0x9c7: 0x2000, 0x9c8: 0x2000, 0x9c9: 0x2000, 0x9ca: 0x2000, 0x9cb: 0x2000,
+	0x9cc: 0x2000, 0x9cd: 0x2000, 0x9ce: 0x4000, 0x9cf: 0x2000, 0x9d0: 0x2000, 0x9d1: 0x2000,
+	0x9d2: 0x2000, 0x9d3: 0x2000, 0x9d4: 0x4000, 0x9d5: 0x2000, 0x9d6: 0x2000, 0x9d7: 0x2000,
+	0x9d8: 0x2000, 0x9d9: 0x2000, 0x9da: 0x2000, 0x9db: 0x2000, 0x9dc: 0x2000, 0x9dd: 0x2000,
+	0x9de: 0x2000, 0x9df: 0x2000, 0x9e0: 0x2000, 0x9e1: 0x2000, 0x9e3: 0x2000,
+	0x9e8: 0x2000, 0x9e9: 0x2000,
+	0x9ea: 0x4000, 0x9eb: 0x2000, 0x9ec: 0x2000, 0x9ed: 0x2000, 0x9ee: 0x2000, 0x9ef: 0x2000,
+	0x9f0: 0x2000, 0x9f1: 0x2000, 0x9f2: 0x4000, 0x9f3: 0x4000, 0x9f4: 0x2000, 0x9f5: 0x4000,
+	0x9f6: 0x2000, 0x9f7: 0x2000, 0x9f8: 0x2000, 0x9f9: 0x2000, 0x9fa: 0x4000, 0x9fb: 0x2000,
+	0x9fc: 0x2000, 0x9fd: 0x4000, 0x9fe: 0x2000, 0x9ff: 0x2000,
+	// Block 0x28, offset 0xa00
+	0xa05: 0x4000,
+	0xa0a: 0x4000, 0xa0b: 0x4000,
+	0xa28: 0x4000,
+	0xa3d: 0x2000,
+	// Block 0x29, offset 0xa40
+	0xa4c: 0x4000, 0xa4e: 0x4000,
+	0xa53: 0x4000, 0xa54: 0x4000, 0xa55: 0x4000, 0xa57: 0x4000,
+	0xa76: 0x2000, 0xa77: 0x2000, 0xa78: 0x2000, 0xa79: 0x2000, 0xa7a: 0x2000, 0xa7b: 0x2000,
+	0xa7c: 0x2000, 0xa7d: 0x2000, 0xa7e: 0x2000, 0xa7f: 0x2000,
+	// Block 0x2a, offset 0xa80
+	0xa95: 0x4000, 0xa96: 0x4000, 0xa97: 0x4000,
+	0xab0: 0x4000,
+	0xabf: 0x4000,
+	// Block 0x2b, offset 0xac0
+	0xae6: 0x6000, 0xae7: 0x6000, 0xae8: 0x6000, 0xae9: 0x6000,
+	0xaea: 0x6000, 0xaeb: 0x6000, 0xaec: 0x6000, 0xaed: 0x6000,
+	// Block 0x2c, offset 0xb00
+	0xb05: 0x6010,
+	0xb06: 0x6011,
+	// Block 0x2d, offset 0xb40
+	0xb5b: 0x4000, 0xb5c: 0x4000,
+	// Block 0x2e, offset 0xb80
+	0xb90: 0x4000,
+	0xb95: 0x4000, 0xb96: 0x2000, 0xb97: 0x2000,
+	0xb98: 0x2000, 0xb99: 0x2000,
+	// Block 0x2f, offset 0xbc0
+	0xbc0: 0x4000, 0xbc1: 0x4000, 0xbc2: 0x4000, 0xbc3: 0x4000, 0xbc4: 0x4000, 0xbc5: 0x4000,
+	0xbc6: 0x4000, 0xbc7: 0x4000, 0xbc8: 0x4000, 0xbc9: 0x4000, 0xbca: 0x4000, 0xbcb: 0x4000,
+	0xbcc: 0x4000, 0xbcd: 0x4000, 0xbce: 0x4000, 0xbcf: 0x4000, 0xbd0: 0x4000, 0xbd1: 0x4000,
+	0xbd2: 0x4000, 0xbd3: 0x4000, 0xbd4: 0x4000, 0xbd5: 0x4000, 0xbd6: 0x4000, 0xbd7: 0x4000,
+	0xbd8: 0x4000, 0xbd9: 0x4000, 0xbdb: 0x4000, 0xbdc: 0x4000, 0xbdd: 0x4000,
+	0xbde: 0x4000, 0xbdf: 0x4000, 0xbe0: 0x4000, 0xbe1: 0x4000, 0xbe2: 0x4000, 0xbe3: 0x4000,
+	0xbe4: 0x4000, 0xbe5: 0x4000, 0xbe6: 0x4000, 0xbe7: 0x4000, 0xbe8: 0x4000, 0xbe9: 0x4000,
+	0xbea: 0x4000, 0xbeb: 0x4000, 0xbec: 0x4000, 0xbed: 0x4000, 0xbee: 0x4000, 0xbef: 0x4000,
+	0xbf0: 0x4000, 0xbf1: 0x4000, 0xbf2: 0x4000, 0xbf3: 0x4000, 0xbf4: 0x4000, 0xbf5: 0x4000,
+	0xbf6: 0x4000, 0xbf7: 0x4000, 0xbf8: 0x4000, 0xbf9: 0x4000, 0xbfa: 0x4000, 0xbfb: 0x4000,
+	0xbfc: 0x4000, 0xbfd: 0x4000, 0xbfe: 0x4000, 0xbff: 0x4000,
+	// Block 0x30, offset 0xc00
+	0xc00: 0x4000, 0xc01: 0x4000, 0xc02: 0x4000, 0xc03: 0x4000, 0xc04: 0x4000, 0xc05: 0x4000,
+	0xc06: 0x4000, 0xc07: 0x4000, 0xc08: 0x4000, 0xc09: 0x4000, 0xc0a: 0x4000, 0xc0b: 0x4000,
+	0xc0c: 0x4000, 0xc0d: 0x4000, 0xc0e: 0x4000, 0xc0f: 0x4000, 0xc10: 0x4000, 0xc11: 0x4000,
+	0xc12: 0x4000, 0xc13: 0x4000, 0xc14: 0x4000, 0xc15: 0x4000, 0xc16: 0x4000, 0xc17: 0x4000,
+	0xc18: 0x4000, 0xc19: 0x4000, 0xc1a: 0x4000, 0xc1b: 0x4000, 0xc1c: 0x4000, 0xc1d: 0x4000,
+	0xc1e: 0x4000, 0xc1f: 0x4000, 0xc20: 0x4000, 0xc21: 0x4000, 0xc22: 0x4000, 0xc23: 0x4000,
+	0xc24: 0x4000, 0xc25: 0x4000, 0xc26: 0x4000, 0xc27: 0x4000, 0xc28: 0x4000, 0xc29: 0x4000,
+	0xc2a: 0x4000, 0xc2b: 0x4000, 0xc2c: 0x4000, 0xc2d: 0x4000, 0xc2e: 0x4000, 0xc2f: 0x4000,
+	0xc30: 0x4000, 0xc31: 0x4000, 0xc32: 0x4000, 0xc33: 0x4000,
+	// Block 0x31, offset 0xc40
+	0xc40: 0x4000, 0xc41: 0x4000, 0xc42: 0x4000, 0xc43: 0x4000, 0xc44: 0x4000, 0xc45: 0x4000,
+	0xc46: 0x4000, 0xc47: 0x4000, 0xc48: 0x4000, 0xc49: 0x4000, 0xc4a: 0x4000, 0xc4b: 0x4000,
+	0xc4c: 0x4000, 0xc4d: 0x4000, 0xc4e: 0x4000, 0xc4f: 0x4000, 0xc50: 0x4000, 0xc51: 0x4000,
+	0xc52: 0x4000, 0xc53: 0x4000, 0xc54: 0x4000, 0xc55: 0x4000,
+	0xc70: 0x4000, 0xc71: 0x4000, 0xc72: 0x4000, 0xc73: 0x4000, 0xc74: 0x4000, 0xc75: 0x4000,
+	0xc76: 0x4000, 0xc77: 0x4000, 0xc78: 0x4000, 0xc79: 0x4000, 0xc7a: 0x4000, 0xc7b: 0x4000,
+	// Block 0x32, offset 0xc80
+	0xc80: 0x9012, 0xc81: 0x4013, 0xc82: 0x4014, 0xc83: 0x4000, 0xc84: 0x4000, 0xc85: 0x4000,
+	0xc86: 0x4000, 0xc87: 0x4000, 0xc88: 0x4000, 0xc89: 0x4000, 0xc8a: 0x4000, 0xc8b: 0x4000,
+	0xc8c: 0x4015, 0xc8d: 0x4015, 0xc8e: 0x4000, 0xc8f: 0x4000, 0xc90: 0x4000, 0xc91: 0x4000,
+	0xc92: 0x4000, 0xc93: 0x4000, 0xc94: 0x4000, 0xc95: 0x4000, 0xc96: 0x4000, 0xc97: 0x4000,
+	0xc98: 0x4000, 0xc99: 0x4000, 0xc9a: 0x4000, 0xc9b: 0x4000, 0xc9c: 0x4000, 0xc9d: 0x4000,
+	0xc9e: 0x4000, 0xc9f: 0x4000, 0xca0: 0x4000, 0xca1: 0x4000, 0xca2: 0x4000, 0xca3: 0x4000,
+	0xca4: 0x4000, 0xca5: 0x4000, 0xca6: 0x4000, 0xca7: 0x4000, 0xca8: 0x4000, 0xca9: 0x4000,
+	0xcaa: 0x4000, 0xcab: 0x4000, 0xcac: 0x4000, 0xcad: 0x4000, 0xcae: 0x4000, 0xcaf: 0x4000,
+	0xcb0: 0x4000, 0xcb1: 0x4000, 0xcb2: 0x4000, 0xcb3: 0x4000, 0xcb4: 0x4000, 0xcb5: 0x4000,
+	0xcb6: 0x4000, 0xcb7: 0x4000, 0xcb8: 0x4000, 0xcb9: 0x4000, 0xcba: 0x4000, 0xcbb: 0x4000,
+	0xcbc: 0x4000, 0xcbd: 0x4000, 0xcbe: 0x4000,
+	// Block 0x33, offset 0xcc0
+	0xcc1: 0x4000, 0xcc2: 0x4000, 0xcc3: 0x4000, 0xcc4: 0x4000, 0xcc5: 0x4000,
+	0xcc6: 0x4000, 0xcc7: 0x4000, 0xcc8: 0x4000, 0xcc9: 0x4000, 0xcca: 0x4000, 0xccb: 0x4000,
+	0xccc: 0x4000, 0xccd: 0x4000, 0xcce: 0x4000, 0xccf: 0x4000, 0xcd0: 0x4000, 0xcd1: 0x4000,
+	0xcd2: 0x4000, 0xcd3: 0x4000, 0xcd4: 0x4000, 0xcd5: 0x4000, 0xcd6: 0x4000, 0xcd7: 0x4000,
+	0xcd8: 0x4000, 0xcd9: 0x4000, 0xcda: 0x4000, 0xcdb: 0x4000, 0xcdc: 0x4000, 0xcdd: 0x4000,
+	0xcde: 0x4000, 0xcdf: 0x4000, 0xce0: 0x4000, 0xce1: 0x4000, 0xce2: 0x4000, 0xce3: 0x4000,
+	0xce4: 0x4000, 0xce5: 0x4000, 0xce6: 0x4000, 0xce7: 0x4000, 0xce8: 0x4000, 0xce9: 0x4000,
+	0xcea: 0x4000, 0xceb: 0x4000, 0xcec: 0x4000, 0xced: 0x4000, 0xcee: 0x4000, 0xcef: 0x4000,
+	0xcf0: 0x4000, 0xcf1: 0x4000, 0xcf2: 0x4000, 0xcf3: 0x4000, 0xcf4: 0x4000, 0xcf5: 0x4000,
+	0xcf6: 0x4000, 0xcf7: 0x4000, 0xcf8: 0x4000, 0xcf9: 0x4000, 0xcfa: 0x4000, 0xcfb: 0x4000,
+	0xcfc: 0x4000, 0xcfd: 0x4000, 0xcfe: 0x4000, 0xcff: 0x4000,
+	// Block 0x34, offset 0xd00
+	0xd00: 0x4000, 0xd01: 0x4000, 0xd02: 0x4000, 0xd03: 0x4000, 0xd04: 0x4000, 0xd05: 0x4000,
+	0xd06: 0x4000, 0xd07: 0x4000, 0xd08: 0x4000, 0xd09: 0x4000, 0xd0a: 0x4000, 0xd0b: 0x4000,
+	0xd0c: 0x4000, 0xd0d: 0x4000, 0xd0e: 0x4000, 0xd0f: 0x4000, 0xd10: 0x4000, 0xd11: 0x4000,
+	0xd12: 0x4000, 0xd13: 0x4000, 0xd14: 0x4000, 0xd15: 0x4000, 0xd16: 0x4000,
+	0xd19: 0x4016, 0xd1a: 0x4017, 0xd1b: 0x4000, 0xd1c: 0x4000, 0xd1d: 0x4000,
+	0xd1e: 0x4000, 0xd1f: 0x4000, 0xd20: 0x4000, 0xd21: 0x4018, 0xd22: 0x4019, 0xd23: 0x401a,
+	0xd24: 0x401b, 0xd25: 0x401c, 0xd26: 0x401d, 0xd27: 0x401e, 0xd28: 0x401f, 0xd29: 0x4020,
+	0xd2a: 0x4021, 0xd2b: 0x4022, 0xd2c: 0x4000, 0xd2d: 0x4010, 0xd2e: 0x4000, 0xd2f: 0x4023,
+	0xd30: 0x4000, 0xd31: 0x4024, 0xd32: 0x4000, 0xd33: 0x4025, 0xd34: 0x4000, 0xd35: 0x4026,
+	0xd36: 0x4000, 0xd37: 0x401a, 0xd38: 0x4000, 0xd39: 0x4027, 0xd3a: 0x4000, 0xd3b: 0x4028,
+	0xd3c: 0x4000, 0xd3d: 0x4020, 0xd3e: 0x4000, 0xd3f: 0x4029,
+	// Block 0x35, offset 0xd40
+	0xd40: 0x4000, 0xd41: 0x402a, 0xd42: 0x4000, 0xd43: 0x402b, 0xd44: 0x402c, 0xd45: 0x4000,
+	0xd46: 0x4017, 0xd47: 0x4000, 0xd48: 0x402d, 0xd49: 0x4000, 0xd4a: 0x402e, 0xd4b: 0x402f,
+	0xd4c: 0x4030, 0xd4d: 0x4017, 0xd4e: 0x4016, 0xd4f: 0x4017, 0xd50: 0x4000, 0xd51: 0x4000,
+	0xd52: 0x4031, 0xd53: 0x4000, 0xd54: 0x4000, 0xd55: 0x4031, 0xd56: 0x4000, 0xd57: 0x4000,
+	0xd58: 0x4032, 0xd59: 0x4000, 0xd5a: 0x4000, 0xd5b: 0x4032, 0xd5c: 0x4000, 0xd5d: 0x4000,
+	0xd5e: 0x4033, 0xd5f: 0x402e, 0xd60: 0x4034, 0xd61: 0x4035, 0xd62: 0x4034, 0xd63: 0x4036,
+	0xd64: 0x4037, 0xd65: 0x4024, 0xd66: 0x4035, 0xd67: 0x4025, 0xd68: 0x4038, 0xd69: 0x4038,
+	0xd6a: 0x4039, 0xd6b: 0x4039, 0xd6c: 0x403a, 0xd6d: 0x403a, 0xd6e: 0x4000, 0xd6f: 0x4035,
+	0xd70: 0x4000, 0xd71: 0x4000, 0xd72: 0x403b, 0xd73: 0x403c, 0xd74: 0x4000, 0xd75: 0x4000,
+	0xd76: 0x4000, 0xd77: 0x4000, 0xd78: 0x4000, 0xd79: 0x4000, 0xd7a: 0x4000, 0xd7b: 0x403d,
+	0xd7c: 0x401c, 0xd7d: 0x4000, 0xd7e: 0x4000, 0xd7f: 0x4000,
+	// Block 0x36, offset 0xd80
+	0xd85: 0x4000,
+	0xd86: 0x4000, 0xd87: 0x4000, 0xd88: 0x4000, 0xd89: 0x4000, 0xd8a: 0x4000, 0xd8b: 0x4000,
+	0xd8c: 0x4000, 0xd8d: 0x4000, 0xd8e: 0x4000, 0xd8f: 0x4000, 0xd90: 0x4000, 0xd91: 0x4000,
+	0xd92: 0x4000, 0xd93: 0x4000, 0xd94: 0x4000, 0xd95: 0x4000, 0xd96: 0x4000, 0xd97: 0x4000,
+	0xd98: 0x4000, 0xd99: 0x4000, 0xd9a: 0x4000, 0xd9b: 0x4000, 0xd9c: 0x4000, 0xd9d: 0x4000,
+	0xd9e: 0x4000, 0xd9f: 0x4000, 0xda0: 0x4000, 0xda1: 0x4000, 0xda2: 0x4000, 0xda3: 0x4000,
+	0xda4: 0x4000, 0xda5: 0x4000, 0xda6: 0x4000, 0xda7: 0x4000, 0xda8: 0x4000, 0xda9: 0x4000,
+	0xdaa: 0x4000, 0xdab: 0x4000, 0xdac: 0x4000, 0xdad: 0x4000,
+	0xdb1: 0x403e, 0xdb2: 0x403e, 0xdb3: 0x403e, 0xdb4: 0x403e, 0xdb5: 0x403e,
+	0xdb6: 0x403e, 0xdb7: 0x403e, 0xdb8: 0x403e, 0xdb9: 0x403e, 0xdba: 0x403e, 0xdbb: 0x403e,
+	0xdbc: 0x403e, 0xdbd: 0x403e, 0xdbe: 0x403e, 0xdbf: 0x403e,
+	// Block 0x37, offset 0xdc0
+	0xdc0: 0x4037, 0xdc1: 0x4037, 0xdc2: 0x4037, 0xdc3: 0x4037, 0xdc4: 0x4037, 0xdc5: 0x4037,
+	0xdc6: 0x4037, 0xdc7: 0x4037, 0xdc8: 0x4037, 0xdc9: 0x4037, 0xdca: 0x4037, 0xdcb: 0x4037,
+	0xdcc: 0x4037, 0xdcd: 0x4037, 0xdce: 0x4037, 0xdcf: 0x400e, 0xdd0: 0x403f, 0xdd1: 0x4040,
+	0xdd2: 0x4041, 0xdd3: 0x4040, 0xdd4: 0x403f, 0xdd5: 0x4042, 0xdd6: 0x4043, 0xdd7: 0x4044,
+	0xdd8: 0x4040, 0xdd9: 0x4041, 0xdda: 0x4040, 0xddb: 0x4045, 0xddc: 0x4009, 0xddd: 0x4045,
+	0xdde: 0x4046, 0xddf: 0x4045, 0xde0: 0x4047, 0xde1: 0x400b, 0xde2: 0x400a, 0xde3: 0x400c,
+	0xde4: 0x4048, 0xde5: 0x4000, 0xde6: 0x4000, 0xde7: 0x4000, 0xde8: 0x4000, 0xde9: 0x4000,
+	0xdea: 0x4000, 0xdeb: 0x4000, 0xdec: 0x4000, 0xded: 0x4000, 0xdee: 0x4000, 0xdef: 0x4000,
+	0xdf0: 0x4000, 0xdf1: 0x4000, 0xdf2: 0x4000, 0xdf3: 0x4000, 0xdf4: 0x4000, 0xdf5: 0x4000,
+	0xdf6: 0x4000, 0xdf7: 0x4000, 0xdf8: 0x4000, 0xdf9: 0x4000, 0xdfa: 0x4000, 0xdfb: 0x4000,
+	0xdfc: 0x4000, 0xdfd: 0x4000, 0xdfe: 0x4000, 0xdff: 0x4000,
+	// Block 0x38, offset 0xe00
+	0xe00: 0x4000, 0xe01: 0x4000, 0xe02: 0x4000, 0xe03: 0x4000, 0xe04: 0x4000, 0xe05: 0x4000,
+	0xe06: 0x4000, 0xe07: 0x4000, 0xe08: 0x4000, 0xe09: 0x4000, 0xe0a: 0x4000, 0xe0b: 0x4000,
+	0xe0c: 0x4000, 0xe0d: 0x4000, 0xe0e: 0x4000, 0xe10: 0x4000, 0xe11: 0x4000,
+	0xe12: 0x4000, 0xe13: 0x4000, 0xe14: 0x4000, 0xe15: 0x4000, 0xe16: 0x4000, 0xe17: 0x4000,
+	0xe18: 0x4000, 0xe19: 0x4000, 0xe1a: 0x4000, 0xe1b: 0x4000, 0xe1c: 0x4000, 0xe1d: 0x4000,
+	0xe1e: 0x4000, 0xe1f: 0x4000, 0xe20: 0x4000, 0xe21: 0x4000, 0xe22: 0x4000, 0xe23: 0x4000,
+	0xe24: 0x4000, 0xe25: 0x4000, 0xe26: 0x4000, 0xe27: 0x4000, 0xe28: 0x4000, 0xe29: 0x4000,
+	0xe2a: 0x4000, 0xe2b: 0x4000, 0xe2c: 0x4000, 0xe2d: 0x4000, 0xe2e: 0x4000, 0xe2f: 0x4000,
+	0xe30: 0x4000, 0xe31: 0x4000, 0xe32: 0x4000, 0xe33: 0x4000, 0xe34: 0x4000, 0xe35: 0x4000,
+	0xe36: 0x4000, 0xe37: 0x4000, 0xe38: 0x4000, 0xe39: 0x4000, 0xe3a: 0x4000,
+	// Block 0x39, offset 0xe40
+	0xe40: 0x4000, 0xe41: 0x4000, 0xe42: 0x4000, 0xe43: 0x4000, 0xe44: 0x4000, 0xe45: 0x4000,
+	0xe46: 0x4000, 0xe47: 0x4000, 0xe48: 0x4000, 0xe49: 0x4000, 0xe4a: 0x4000, 0xe4b: 0x4000,
+	0xe4c: 0x4000, 0xe4d: 0x4000, 0xe4e: 0x4000, 0xe4f: 0x4000, 0xe50: 0x4000, 0xe51: 0x4000,
+	0xe52: 0x4000, 0xe53: 0x4000, 0xe54: 0x4000, 0xe55: 0x4000, 0xe56: 0x4000, 0xe57: 0x4000,
+	0xe58: 0x4000, 0xe59: 0x4000, 0xe5a: 0x4000, 0xe5b: 0x4000, 0xe5c: 0x4000, 0xe5d: 0x4000,
+	0xe5e: 0x4000, 0xe5f: 0x4000, 0xe60: 0x4000, 0xe61: 0x4000, 0xe62: 0x4000, 0xe63: 0x4000,
+	0xe70: 0x4000, 0xe71: 0x4000, 0xe72: 0x4000, 0xe73: 0x4000, 0xe74: 0x4000, 0xe75: 0x4000,
+	0xe76: 0x4000, 0xe77: 0x4000, 0xe78: 0x4000, 0xe79: 0x4000, 0xe7a: 0x4000, 0xe7b: 0x4000,
+	0xe7c: 0x4000, 0xe7d: 0x4000, 0xe7e: 0x4000, 0xe7f: 0x4000,
+	// Block 0x3a, offset 0xe80
+	0xe80: 0x4000, 0xe81: 0x4000, 0xe82: 0x4000, 0xe83: 0x4000, 0xe84: 0x4000, 0xe85: 0x4000,
+	0xe86: 0x4000, 0xe87: 0x4000, 0xe88: 0x4000, 0xe89: 0x4000, 0xe8a: 0x4000, 0xe8b: 0x4000,
+	0xe8c: 0x4000, 0xe8d: 0x4000, 0xe8e: 0x4000, 0xe8f: 0x4000, 0xe90: 0x4000, 0xe91: 0x4000,
+	0xe92: 0x4000, 0xe93: 0x4000, 0xe94: 0x4000, 0xe95: 0x4000, 0xe96: 0x4000, 0xe97: 0x4000,
+	0xe98: 0x4000, 0xe99: 0x4000, 0xe9a: 0x4000, 0xe9b: 0x4000, 0xe9c: 0x4000, 0xe9d: 0x4000,
+	0xe9e: 0x4000, 0xea0: 0x4000, 0xea1: 0x4000, 0xea2: 0x4000, 0xea3: 0x4000,
+	0xea4: 0x4000, 0xea5: 0x4000, 0xea6: 0x4000, 0xea7: 0x4000, 0xea8: 0x4000, 0xea9: 0x4000,
+	0xeaa: 0x4000, 0xeab: 0x4000, 0xeac: 0x4000, 0xead: 0x4000, 0xeae: 0x4000, 0xeaf: 0x4000,
+	0xeb0: 0x4000, 0xeb1: 0x4000, 0xeb2: 0x4000, 0xeb3: 0x4000, 0xeb4: 0x4000, 0xeb5: 0x4000,
+	0xeb6: 0x4000, 0xeb7: 0x4000, 0xeb8: 0x4000, 0xeb9: 0x4000, 0xeba: 0x4000, 0xebb: 0x4000,
+	0xebc: 0x4000, 0xebd: 0x4000, 0xebe: 0x4000, 0xebf: 0x4000,
+	// Block 0x3b, offset 0xec0
+	0xec0: 0x4000, 0xec1: 0x4000, 0xec2: 0x4000, 0xec3: 0x4000, 0xec4: 0x4000, 0xec5: 0x4000,
+	0xec6: 0x4000, 0xec7: 0x4000, 0xec8: 0x2000, 0xec9: 0x2000, 0xeca: 0x2000, 0xecb: 0x2000,
+	0xecc: 0x2000, 0xecd: 0x2000, 0xece: 0x2000, 0xecf: 0x2000, 0xed0: 0x4000, 0xed1: 0x4000,
+	0xed2: 0x4000, 0xed3: 0x4000, 0xed4: 0x4000, 0xed5: 0x4000, 0xed6: 0x4000, 0xed7: 0x4000,
+	0xed8: 0x4000, 0xed9: 0x4000, 0xeda: 0x4000, 0xedb: 0x4000, 0xedc: 0x4000, 0xedd: 0x4000,
+	0xede: 0x4000, 0xedf: 0x4000, 0xee0: 0x4000, 0xee1: 0x4000, 0xee2: 0x4000, 0xee3: 0x4000,
+	0xee4: 0x4000, 0xee5: 0x4000, 0xee6: 0x4000, 0xee7: 0x4000, 0xee8: 0x4000, 0xee9: 0x4000,
+	0xeea: 0x4000, 0xeeb: 0x4000, 0xeec: 0x4000, 0xeed: 0x4000, 0xeee: 0x4000, 0xeef: 0x4000,
+	0xef0: 0x4000, 0xef1: 0x4000, 0xef2: 0x4000, 0xef3: 0x4000, 0xef4: 0x4000, 0xef5: 0x4000,
+	0xef6: 0x4000, 0xef7: 0x4000, 0xef8: 0x4000, 0xef9: 0x4000, 0xefa: 0x4000, 0xefb: 0x4000,
+	0xefc: 0x4000, 0xefd: 0x4000, 0xefe: 0x4000, 0xeff: 0x4000,
+	// Block 0x3c, offset 0xf00
+	0xf00: 0x4000, 0xf01: 0x4000, 0xf02: 0x4000, 0xf03: 0x4000, 0xf04: 0x4000, 0xf05: 0x4000,
+	0xf06: 0x4000, 0xf07: 0x4000, 0xf08: 0x4000, 0xf09: 0x4000, 0xf0a: 0x4000, 0xf0b: 0x4000,
+	0xf0c: 0x4000, 0xf0d: 0x4000, 0xf0e: 0x4000, 0xf0f: 0x4000, 0xf10: 0x4000, 0xf11: 0x4000,
+	0xf12: 0x4000, 0xf13: 0x4000, 0xf14: 0x4000, 0xf15: 0x4000, 0xf16: 0x4000, 0xf17: 0x4000,
+	0xf18: 0x4000, 0xf19: 0x4000, 0xf1a: 0x4000, 0xf1b: 0x4000, 0xf1c: 0x4000, 0xf1d: 0x4000,
+	0xf1e: 0x4000, 0xf1f: 0x4000, 0xf20: 0x4000, 0xf21: 0x4000, 0xf22: 0x4000, 0xf23: 0x4000,
+	0xf24: 0x4000, 0xf25: 0x4000, 0xf26: 0x4000, 0xf27: 0x4000, 0xf28: 0x4000, 0xf29: 0x4000,
+	0xf2a: 0x4000, 0xf2b: 0x4000, 0xf2c: 0x4000, 0xf2d: 0x4000, 0xf2e: 0x4000, 0xf2f: 0x4000,
+	0xf30: 0x4000, 0xf31: 0x4000, 0xf32: 0x4000, 0xf33: 0x4000, 0xf34: 0x4000, 0xf35: 0x4000,
+	0xf36: 0x4000, 0xf37: 0x4000, 0xf38: 0x4000, 0xf39: 0x4000, 0xf3a: 0x4000, 0xf3b: 0x4000,
+	0xf3c: 0x4000, 0xf3d: 0x4000, 0xf3e: 0x4000,
+	// Block 0x3d, offset 0xf40
+	0xf40: 0x4000, 0xf41: 0x4000, 0xf42: 0x4000, 0xf43: 0x4000, 0xf44: 0x4000, 0xf45: 0x4000,
+	0xf46: 0x4000, 0xf47: 0x4000, 0xf48: 0x4000, 0xf49: 0x4000, 0xf4a: 0x4000, 0xf4b: 0x4000,
+	0xf4c: 0x4000, 0xf50: 0x4000, 0xf51: 0x4000,
+	0xf52: 0x4000, 0xf53: 0x4000, 0xf54: 0x4000, 0xf55: 0x4000, 0xf56: 0x4000, 0xf57: 0x4000,
+	0xf58: 0x4000, 0xf59: 0x4000, 0xf5a: 0x4000, 0xf5b: 0x4000, 0xf5c: 0x4000, 0xf5d: 0x4000,
+	0xf5e: 0x4000, 0xf5f: 0x4000, 0xf60: 0x4000, 0xf61: 0x4000, 0xf62: 0x4000, 0xf63: 0x4000,
+	0xf64: 0x4000, 0xf65: 0x4000, 0xf66: 0x4000, 0xf67: 0x4000, 0xf68: 0x4000, 0xf69: 0x4000,
+	0xf6a: 0x4000, 0xf6b: 0x4000, 0xf6c: 0x4000, 0xf6d: 0x4000, 0xf6e: 0x4000, 0xf6f: 0x4000,
+	0xf70: 0x4000, 0xf71: 0x4000, 0xf72: 0x4000, 0xf73: 0x4000, 0xf74: 0x4000, 0xf75: 0x4000,
+	0xf76: 0x4000, 0xf77: 0x4000, 0xf78: 0x4000, 0xf79: 0x4000, 0xf7a: 0x4000, 0xf7b: 0x4000,
+	0xf7c: 0x4000, 0xf7d: 0x4000, 0xf7e: 0x4000, 0xf7f: 0x4000,
+	// Block 0x3e, offset 0xf80
+	0xf80: 0x4000, 0xf81: 0x4000, 0xf82: 0x4000, 0xf83: 0x4000, 0xf84: 0x4000, 0xf85: 0x4000,
+	0xf86: 0x4000,
+	// Block 0x3f, offset 0xfc0
+	0xfe0: 0x4000, 0xfe1: 0x4000, 0xfe2: 0x4000, 0xfe3: 0x4000,
+	0xfe4: 0x4000, 0xfe5: 0x4000, 0xfe6: 0x4000, 0xfe7: 0x4000, 0xfe8: 0x4000, 0xfe9: 0x4000,
+	0xfea: 0x4000, 0xfeb: 0x4000, 0xfec: 0x4000, 0xfed: 0x4000, 0xfee: 0x4000, 0xfef: 0x4000,
+	0xff0: 0x4000, 0xff1: 0x4000, 0xff2: 0x4000, 0xff3: 0x4000, 0xff4: 0x4000, 0xff5: 0x4000,
+	0xff6: 0x4000, 0xff7: 0x4000, 0xff8: 0x4000, 0xff9: 0x4000, 0xffa: 0x4000, 0xffb: 0x4000,
+	0xffc: 0x4000,
+	// Block 0x40, offset 0x1000
+	0x1000: 0x4000, 0x1001: 0x4000, 0x1002: 0x4000, 0x1003: 0x4000, 0x1004: 0x4000, 0x1005: 0x4000,
+	0x1006: 0x4000, 0x1007: 0x4000, 0x1008: 0x4000, 0x1009: 0x4000, 0x100a: 0x4000, 0x100b: 0x4000,
+	0x100c: 0x4000, 0x100d: 0x4000, 0x100e: 0x4000, 0x100f: 0x4000, 0x1010: 0x4000, 0x1011: 0x4000,
+	0x1012: 0x4000, 0x1013: 0x4000, 0x1014: 0x4000, 0x1015: 0x4000, 0x1016: 0x4000, 0x1017: 0x4000,
+	0x1018: 0x4000, 0x1019: 0x4000, 0x101a: 0x4000, 0x101b: 0x4000, 0x101c: 0x4000, 0x101d: 0x4000,
+	0x101e: 0x4000, 0x101f: 0x4000, 0x1020: 0x4000, 0x1021: 0x4000, 0x1022: 0x4000, 0x1023: 0x4000,
+	// Block 0x41, offset 0x1040
+	0x1040: 0x2000, 0x1041: 0x2000, 0x1042: 0x2000, 0x1043: 0x2000, 0x1044: 0x2000, 0x1045: 0x2000,
+	0x1046: 0x2000, 0x1047: 0x2000, 0x1048: 0x2000, 0x1049: 0x2000, 0x104a: 0x2000, 0x104b: 0x2000,
+	0x104c: 0x2000, 0x104d: 0x2000, 0x104e: 0x2000, 0x104f: 0x2000, 0x1050: 0x4000, 0x1051: 0x4000,
+	0x1052: 0x4000, 0x1053: 0x4000, 0x1054: 0x4000, 0x1055: 0x4000, 0x1056: 0x4000, 0x1057: 0x4000,
+	0x1058: 0x4000, 0x1059: 0x4000,
+	0x1070: 0x4000, 0x1071: 0x4000, 0x1072: 0x4000, 0x1073: 0x4000, 0x1074: 0x4000, 0x1075: 0x4000,
+	0x1076: 0x4000, 0x1077: 0x4000, 0x1078: 0x4000, 0x1079: 0x4000, 0x107a: 0x4000, 0x107b: 0x4000,
+	0x107c: 0x4000, 0x107d: 0x4000, 0x107e: 0x4000, 0x107f: 0x4000,
+	// Block 0x42, offset 0x1080
+	0x1080: 0x4000, 0x1081: 0x4000, 0x1082: 0x4000, 0x1083: 0x4000, 0x1084: 0x4000, 0x1085: 0x4000,
+	0x1086: 0x4000, 0x1087: 0x4000, 0x1088: 0x4000, 0x1089: 0x4000, 0x108a: 0x4000, 0x108b: 0x4000,
+	0x108c: 0x4000, 0x108d: 0x4000, 0x108e: 0x4000, 0x108f: 0x4000, 0x1090: 0x4000, 0x1091: 0x4000,
+	0x1092: 0x4000, 0x1094: 0x4000, 0x1095: 0x4000, 0x1096: 0x4000, 0x1097: 0x4000,
+	0x1098: 0x4000, 0x1099: 0x4000, 0x109a: 0x4000, 0x109b: 0x4000, 0x109c: 0x4000, 0x109d: 0x4000,
+	0x109e: 0x4000, 0x109f: 0x4000, 0x10a0: 0x4000, 0x10a1: 0x4000, 0x10a2: 0x4000, 0x10a3: 0x4000,
+	0x10a4: 0x4000, 0x10a5: 0x4000, 0x10a6: 0x4000, 0x10a8: 0x4000, 0x10a9: 0x4000,
+	0x10aa: 0x4000, 0x10ab: 0x4000,
+	// Block 0x43, offset 0x10c0
+	0x10c1: 0x9012, 0x10c2: 0x9012, 0x10c3: 0x9012, 0x10c4: 0x9012, 0x10c5: 0x9012,
+	0x10c6: 0x9012, 0x10c7: 0x9012, 0x10c8: 0x9012, 0x10c9: 0x9012, 0x10ca: 0x9012, 0x10cb: 0x9012,
+	0x10cc: 0x9012, 0x10cd: 0x9012, 0x10ce: 0x9012, 0x10cf: 0x9012, 0x10d0: 0x9012, 0x10d1: 0x9012,
+	0x10d2: 0x9012, 0x10d3: 0x9012, 0x10d4: 0x9012, 0x10d5: 0x9012, 0x10d6: 0x9012, 0x10d7: 0x9012,
+	0x10d8: 0x9012, 0x10d9: 0x9012, 0x10da: 0x9012, 0x10db: 0x9012, 0x10dc: 0x9012, 0x10dd: 0x9012,
+	0x10de: 0x9012, 0x10df: 0x9012, 0x10e0: 0x9049, 0x10e1: 0x9049, 0x10e2: 0x9049, 0x10e3: 0x9049,
+	0x10e4: 0x9049, 0x10e5: 0x9049, 0x10e6: 0x9049, 0x10e7: 0x9049, 0x10e8: 0x9049, 0x10e9: 0x9049,
+	0x10ea: 0x9049, 0x10eb: 0x9049, 0x10ec: 0x9049, 0x10ed: 0x9049, 0x10ee: 0x9049, 0x10ef: 0x9049,
+	0x10f0: 0x9049, 0x10f1: 0x9049, 0x10f2: 0x9049, 0x10f3: 0x9049, 0x10f4: 0x9049, 0x10f5: 0x9049,
+	0x10f6: 0x9049, 0x10f7: 0x9049, 0x10f8: 0x9049, 0x10f9: 0x9049, 0x10fa: 0x9049, 0x10fb: 0x9049,
+	0x10fc: 0x9049, 0x10fd: 0x9049, 0x10fe: 0x9049, 0x10ff: 0x9049,
+	// Block 0x44, offset 0x1100
+	0x1100: 0x9049, 0x1101: 0x9049, 0x1102: 0x9049, 0x1103: 0x9049, 0x1104: 0x9049, 0x1105: 0x9049,
+	0x1106: 0x9049, 0x1107: 0x9049, 0x1108: 0x9049, 0x1109: 0x9049, 0x110a: 0x9049, 0x110b: 0x9049,
+	0x110c: 0x9049, 0x110d: 0x9049, 0x110e: 0x9049, 0x110f: 0x9049, 0x1110: 0x9049, 0x1111: 0x9049,
+	0x1112: 0x9049, 0x1113: 0x9049, 0x1114: 0x9049, 0x1115: 0x9049, 0x1116: 0x9049, 0x1117: 0x9049,
+	0x1118: 0x9049, 0x1119: 0x9049, 0x111a: 0x9049, 0x111b: 0x9049, 0x111c: 0x9049, 0x111d: 0x9049,
+	0x111e: 0x9049, 0x111f: 0x904a, 0x1120: 0x904b, 0x1121: 0xb04c, 0x1122: 0xb04d, 0x1123: 0xb04d,
+	0x1124: 0xb04e, 0x1125: 0xb04f, 0x1126: 0xb050, 0x1127: 0xb051, 0x1128: 0xb052, 0x1129: 0xb053,
+	0x112a: 0xb054, 0x112b: 0xb055, 0x112c: 0xb056, 0x112d: 0xb057, 0x112e: 0xb058, 0x112f: 0xb059,
+	0x1130: 0xb05a, 0x1131: 0xb05b, 0x1132: 0xb05c, 0x1133: 0xb05d, 0x1134: 0xb05e, 0x1135: 0xb05f,
+	0x1136: 0xb060, 0x1137: 0xb061, 0x1138: 0xb062, 0x1139: 0xb063, 0x113a: 0xb064, 0x113b: 0xb065,
+	0x113c: 0xb052, 0x113d: 0xb066, 0x113e: 0xb067, 0x113f: 0xb055,
+	// Block 0x45, offset 0x1140
+	0x1140: 0xb068, 0x1141: 0xb069, 0x1142: 0xb06a, 0x1143: 0xb06b, 0x1144: 0xb05a, 0x1145: 0xb056,
+	0x1146: 0xb06c, 0x1147: 0xb06d, 0x1148: 0xb06b, 0x1149: 0xb06e, 0x114a: 0xb06b, 0x114b: 0xb06f,
+	0x114c: 0xb06f, 0x114d: 0xb070, 0x114e: 0xb070, 0x114f: 0xb071, 0x1150: 0xb056, 0x1151: 0xb072,
+	0x1152: 0xb073, 0x1153: 0xb072, 0x1154: 0xb074, 0x1155: 0xb073, 0x1156: 0xb075, 0x1157: 0xb075,
+	0x1158: 0xb076, 0x1159: 0xb076, 0x115a: 0xb077, 0x115b: 0xb077, 0x115c: 0xb073, 0x115d: 0xb078,
+	0x115e: 0xb079, 0x115f: 0xb067, 0x1160: 0xb07a, 0x1161: 0xb07b, 0x1162: 0xb07b, 0x1163: 0xb07b,
+	0x1164: 0xb07b, 0x1165: 0xb07b, 0x1166: 0xb07b, 0x1167: 0xb07b, 0x1168: 0xb07b, 0x1169: 0xb07b,
+	0x116a: 0xb07b, 0x116b: 0xb07b, 0x116c: 0xb07b, 0x116d: 0xb07b, 0x116e: 0xb07b, 0x116f: 0xb07b,
+	0x1170: 0xb07c, 0x1171: 0xb07c, 0x1172: 0xb07c, 0x1173: 0xb07c, 0x1174: 0xb07c, 0x1175: 0xb07c,
+	0x1176: 0xb07c, 0x1177: 0xb07c, 0x1178: 0xb07c, 0x1179: 0xb07c, 0x117a: 0xb07c, 0x117b: 0xb07c,
+	0x117c: 0xb07c, 0x117d: 0xb07c, 0x117e: 0xb07c,
+	// Block 0x46, offset 0x1180
+	0x1182: 0xb07d, 0x1183: 0xb07e, 0x1184: 0xb07f, 0x1185: 0xb080,
+	0x1186: 0xb07f, 0x1187: 0xb07e, 0x118a: 0xb081, 0x118b: 0xb082,
+	0x118c: 0xb083, 0x118d: 0xb07f, 0x118e: 0xb080, 0x118f: 0xb07f,
+	0x1192: 0xb084, 0x1193: 0xb085, 0x1194: 0xb084, 0x1195: 0xb086, 0x1196: 0xb084, 0x1197: 0xb087,
+	0x119a: 0xb088, 0x119b: 0xb089, 0x119c: 0xb08a,
+	0x11a0: 0x908b, 0x11a1: 0x908b, 0x11a2: 0x908c, 0x11a3: 0x908d,
+	0x11a4: 0x908b, 0x11a5: 0x908e, 0x11a6: 0x908f, 0x11a8: 0xb090, 0x11a9: 0xb091,
+	0x11aa: 0xb092, 0x11ab: 0xb091, 0x11ac: 0xb093, 0x11ad: 0xb094, 0x11ae: 0xb095,
+	0x11bd: 0x2000,
+	// Block 0x47, offset 0x11c0
+	0x11e0: 0x4000,
+	// Block 0x48, offset 0x1200
+	0x1200: 0x4000, 0x1201: 0x4000, 0x1202: 0x4000, 0x1203: 0x4000, 0x1204: 0x4000, 0x1205: 0x4000,
+	0x1206: 0x4000, 0x1207: 0x4000, 0x1208: 0x4000, 0x1209: 0x4000, 0x120a: 0x4000, 0x120b: 0x4000,
+	0x120c: 0x4000, 0x120d: 0x4000, 0x120e: 0x4000, 0x120f: 0x4000, 0x1210: 0x4000, 0x1211: 0x4000,
+	0x1212: 0x4000, 0x1213: 0x4000, 0x1214: 0x4000, 0x1215: 0x4000, 0x1216: 0x4000, 0x1217: 0x4000,
+	0x1218: 0x4000, 0x1219: 0x4000, 0x121a: 0x4000, 0x121b: 0x4000, 0x121c: 0x4000, 0x121d: 0x4000,
+	0x121e: 0x4000, 0x121f: 0x4000, 0x1220: 0x4000, 0x1221: 0x4000, 0x1222: 0x4000, 0x1223: 0x4000,
+	0x1224: 0x4000, 0x1225: 0x4000, 0x1226: 0x4000, 0x1227: 0x4000, 0x1228: 0x4000, 0x1229: 0x4000,
+	0x122a: 0x4000, 0x122b: 0x4000, 0x122c: 0x4000,
+	// Block 0x49, offset 0x1240
+	0x1240: 0x4000, 0x1241: 0x4000, 0x1242: 0x4000, 0x1243: 0x4000, 0x1244: 0x4000, 0x1245: 0x4000,
+	0x1246: 0x4000, 0x1247: 0x4000, 0x1248: 0x4000, 0x1249: 0x4000, 0x124a: 0x4000, 0x124b: 0x4000,
+	0x124c: 0x4000, 0x124d: 0x4000, 0x124e: 0x4000, 0x124f: 0x4000, 0x1250: 0x4000, 0x1251: 0x4000,
+	0x1252: 0x4000, 0x1253: 0x4000, 0x1254: 0x4000, 0x1255: 0x4000, 0x1256: 0x4000, 0x1257: 0x4000,
+	0x1258: 0x4000, 0x1259: 0x4000, 0x125a: 0x4000, 0x125b: 0x4000, 0x125c: 0x4000, 0x125d: 0x4000,
+	0x125e: 0x4000, 0x125f: 0x4000, 0x1260: 0x4000, 0x1261: 0x4000, 0x1262: 0x4000, 0x1263: 0x4000,
+	0x1264: 0x4000, 0x1265: 0x4000, 0x1266: 0x4000, 0x1267: 0x4000, 0x1268: 0x4000, 0x1269: 0x4000,
+	0x126a: 0x4000, 0x126b: 0x4000, 0x126c: 0x4000, 0x126d: 0x4000, 0x126e: 0x4000, 0x126f: 0x4000,
+	0x1270: 0x4000, 0x1271: 0x4000, 0x1272: 0x4000,
+	// Block 0x4a, offset 0x1280
+	0x1280: 0x4000, 0x1281: 0x4000,
+	// Block 0x4b, offset 0x12c0
+	0x12c4: 0x4000,
+	// Block 0x4c, offset 0x1300
+	0x130f: 0x4000,
+	// Block 0x4d, offset 0x1340
+	0x1340: 0x2000, 0x1341: 0x2000, 0x1342: 0x2000, 0x1343: 0x2000, 0x1344: 0x2000, 0x1345: 0x2000,
+	0x1346: 0x2000, 0x1347: 0x2000, 0x1348: 0x2000, 0x1349: 0x2000, 0x134a: 0x2000,
+	0x1350: 0x2000, 0x1351: 0x2000,
+	0x1352: 0x2000, 0x1353: 0x2000, 0x1354: 0x2000, 0x1355: 0x2000, 0x1356: 0x2000, 0x1357: 0x2000,
+	0x1358: 0x2000, 0x1359: 0x2000, 0x135a: 0x2000, 0x135b: 0x2000, 0x135c: 0x2000, 0x135d: 0x2000,
+	0x135e: 0x2000, 0x135f: 0x2000, 0x1360: 0x2000, 0x1361: 0x2000, 0x1362: 0x2000, 0x1363: 0x2000,
+	0x1364: 0x2000, 0x1365: 0x2000, 0x1366: 0x2000, 0x1367: 0x2000, 0x1368: 0x2000, 0x1369: 0x2000,
+	0x136a: 0x2000, 0x136b: 0x2000, 0x136c: 0x2000, 0x136d: 0x2000,
+	0x1370: 0x2000, 0x1371: 0x2000, 0x1372: 0x2000, 0x1373: 0x2000, 0x1374: 0x2000, 0x1375: 0x2000,
+	0x1376: 0x2000, 0x1377: 0x2000, 0x1378: 0x2000, 0x1379: 0x2000, 0x137a: 0x2000, 0x137b: 0x2000,
+	0x137c: 0x2000, 0x137d: 0x2000, 0x137e: 0x2000, 0x137f: 0x2000,
+	// Block 0x4e, offset 0x1380
+	0x1380: 0x2000, 0x1381: 0x2000, 0x1382: 0x2000, 0x1383: 0x2000, 0x1384: 0x2000, 0x1385: 0x2000,
+	0x1386: 0x2000, 0x1387: 0x2000, 0x1388: 0x2000, 0x1389: 0x2000, 0x138a: 0x2000, 0x138b: 0x2000,
+	0x138c: 0x2000, 0x138d: 0x2000, 0x138e: 0x2000, 0x138f: 0x2000, 0x1390: 0x2000, 0x1391: 0x2000,
+	0x1392: 0x2000, 0x1393: 0x2000, 0x1394: 0x2000, 0x1395: 0x2000, 0x1396: 0x2000, 0x1397: 0x2000,
+	0x1398: 0x2000, 0x1399: 0x2000, 0x139a: 0x2000, 0x139b: 0x2000, 0x139c: 0x2000, 0x139d: 0x2000,
+	0x139e: 0x2000, 0x139f: 0x2000, 0x13a0: 0x2000, 0x13a1: 0x2000, 0x13a2: 0x2000, 0x13a3: 0x2000,
+	0x13a4: 0x2000, 0x13a5: 0x2000, 0x13a6: 0x2000, 0x13a7: 0x2000, 0x13a8: 0x2000, 0x13a9: 0x2000,
+	0x13b0: 0x2000, 0x13b1: 0x2000, 0x13b2: 0x2000, 0x13b3: 0x2000, 0x13b4: 0x2000, 0x13b5: 0x2000,
+	0x13b6: 0x2000, 0x13b7: 0x2000, 0x13b8: 0x2000, 0x13b9: 0x2000, 0x13ba: 0x2000, 0x13bb: 0x2000,
+	0x13bc: 0x2000, 0x13bd: 0x2000, 0x13be: 0x2000, 0x13bf: 0x2000,
+	// Block 0x4f, offset 0x13c0
+	0x13c0: 0x2000, 0x13c1: 0x2000, 0x13c2: 0x2000, 0x13c3: 0x2000, 0x13c4: 0x2000, 0x13c5: 0x2000,
+	0x13c6: 0x2000, 0x13c7: 0x2000, 0x13c8: 0x2000, 0x13c9: 0x2000, 0x13ca: 0x2000, 0x13cb: 0x2000,
+	0x13cc: 0x2000, 0x13cd: 0x2000, 0x13ce: 0x4000, 0x13cf: 0x2000, 0x13d0: 0x2000, 0x13d1: 0x4000,
+	0x13d2: 0x4000, 0x13d3: 0x4000, 0x13d4: 0x4000, 0x13d5: 0x4000, 0x13d6: 0x4000, 0x13d7: 0x4000,
+	0x13d8: 0x4000, 0x13d9: 0x4000, 0x13da: 0x4000, 0x13db: 0x2000, 0x13dc: 0x2000, 0x13dd: 0x2000,
+	0x13de: 0x2000, 0x13df: 0x2000, 0x13e0: 0x2000, 0x13e1: 0x2000, 0x13e2: 0x2000, 0x13e3: 0x2000,
+	0x13e4: 0x2000, 0x13e5: 0x2000, 0x13e6: 0x2000, 0x13e7: 0x2000, 0x13e8: 0x2000, 0x13e9: 0x2000,
+	0x13ea: 0x2000, 0x13eb: 0x2000, 0x13ec: 0x2000,
+	// Block 0x50, offset 0x1400
+	0x1400: 0x4000, 0x1401: 0x4000, 0x1402: 0x4000,
+	0x1410: 0x4000, 0x1411: 0x4000,
+	0x1412: 0x4000, 0x1413: 0x4000, 0x1414: 0x4000, 0x1415: 0x4000, 0x1416: 0x4000, 0x1417: 0x4000,
+	0x1418: 0x4000, 0x1419: 0x4000, 0x141a: 0x4000, 0x141b: 0x4000, 0x141c: 0x4000, 0x141d: 0x4000,
+	0x141e: 0x4000, 0x141f: 0x4000, 0x1420: 0x4000, 0x1421: 0x4000, 0x1422: 0x4000, 0x1423: 0x4000,
+	0x1424: 0x4000, 0x1425: 0x4000, 0x1426: 0x4000, 0x1427: 0x4000, 0x1428: 0x4000, 0x1429: 0x4000,
+	0x142a: 0x4000, 0x142b: 0x4000, 0x142c: 0x4000, 0x142d: 0x4000, 0x142e: 0x4000, 0x142f: 0x4000,
+	0x1430: 0x4000, 0x1431: 0x4000, 0x1432: 0x4000, 0x1433: 0x4000, 0x1434: 0x4000, 0x1435: 0x4000,
+	0x1436: 0x4000, 0x1437: 0x4000, 0x1438: 0x4000, 0x1439: 0x4000, 0x143a: 0x4000, 0x143b: 0x4000,
+	// Block 0x51, offset 0x1440
+	0x1440: 0x4000, 0x1441: 0x4000, 0x1442: 0x4000, 0x1443: 0x4000, 0x1444: 0x4000, 0x1445: 0x4000,
+	0x1446: 0x4000, 0x1447: 0x4000, 0x1448: 0x4000,
+	0x1450: 0x4000, 0x1451: 0x4000,
+	// Block 0x52, offset 0x1480
+	0x1480: 0x4000, 0x1481: 0x4000, 0x1482: 0x4000, 0x1483: 0x4000, 0x1484: 0x4000, 0x1485: 0x4000,
+	0x1486: 0x4000, 0x1487: 0x4000, 0x1488: 0x4000, 0x1489: 0x4000, 0x148a: 0x4000, 0x148b: 0x4000,
+	0x148c: 0x4000, 0x148d: 0x4000, 0x148e: 0x4000, 0x148f: 0x4000, 0x1490: 0x4000, 0x1491: 0x4000,
+	0x1492: 0x4000, 0x1493: 0x4000, 0x1494: 0x4000, 0x1495: 0x4000, 0x1496: 0x4000, 0x1497: 0x4000,
+	0x1498: 0x4000, 0x1499: 0x4000, 0x149a: 0x4000, 0x149b: 0x4000, 0x149c: 0x4000, 0x149d: 0x4000,
+	0x149e: 0x4000, 0x149f: 0x4000, 0x14a0: 0x4000,
+	0x14ad: 0x4000, 0x14ae: 0x4000, 0x14af: 0x4000,
+	0x14b0: 0x4000, 0x14b1: 0x4000, 0x14b2: 0x4000, 0x14b3: 0x4000, 0x14b4: 0x4000, 0x14b5: 0x4000,
+	0x14b7: 0x4000, 0x14b8: 0x4000, 0x14b9: 0x4000, 0x14ba: 0x4000, 0x14bb: 0x4000,
+	0x14bc: 0x4000, 0x14bd: 0x4000, 0x14be: 0x4000, 0x14bf: 0x4000,
+	// Block 0x53, offset 0x14c0
+	0x14c0: 0x4000, 0x14c1: 0x4000, 0x14c2: 0x4000, 0x14c3: 0x4000, 0x14c4: 0x4000, 0x14c5: 0x4000,
+	0x14c6: 0x4000, 0x14c7: 0x4000, 0x14c8: 0x4000, 0x14c9: 0x4000, 0x14ca: 0x4000, 0x14cb: 0x4000,
+	0x14cc: 0x4000, 0x14cd: 0x4000, 0x14ce: 0x4000, 0x14cf: 0x4000, 0x14d0: 0x4000, 0x14d1: 0x4000,
+	0x14d2: 0x4000, 0x14d3: 0x4000, 0x14d4: 0x4000, 0x14d5: 0x4000, 0x14d6: 0x4000, 0x14d7: 0x4000,
+	0x14d8: 0x4000, 0x14d9: 0x4000, 0x14da: 0x4000, 0x14db: 0x4000, 0x14dc: 0x4000, 0x14dd: 0x4000,
+	0x14de: 0x4000, 0x14df: 0x4000, 0x14e0: 0x4000, 0x14e1: 0x4000, 0x14e2: 0x4000, 0x14e3: 0x4000,
+	0x14e4: 0x4000, 0x14e5: 0x4000, 0x14e6: 0x4000, 0x14e7: 0x4000, 0x14e8: 0x4000, 0x14e9: 0x4000,
+	0x14ea: 0x4000, 0x14eb: 0x4000, 0x14ec: 0x4000, 0x14ed: 0x4000, 0x14ee: 0x4000, 0x14ef: 0x4000,
+	0x14f0: 0x4000, 0x14f1: 0x4000, 0x14f2: 0x4000, 0x14f3: 0x4000, 0x14f4: 0x4000, 0x14f5: 0x4000,
+	0x14f6: 0x4000, 0x14f7: 0x4000, 0x14f8: 0x4000, 0x14f9: 0x4000, 0x14fa: 0x4000, 0x14fb: 0x4000,
+	0x14fc: 0x4000, 0x14fe: 0x4000, 0x14ff: 0x4000,
+	// Block 0x54, offset 0x1500
+	0x1500: 0x4000, 0x1501: 0x4000, 0x1502: 0x4000, 0x1503: 0x4000, 0x1504: 0x4000, 0x1505: 0x4000,
+	0x1506: 0x4000, 0x1507: 0x4000, 0x1508: 0x4000, 0x1509: 0x4000, 0x150a: 0x4000, 0x150b: 0x4000,
+	0x150c: 0x4000, 0x150d: 0x4000, 0x150e: 0x4000, 0x150f: 0x4000, 0x1510: 0x4000, 0x1511: 0x4000,
+	0x1512: 0x4000, 0x1513: 0x4000,
+	0x1520: 0x4000, 0x1521: 0x4000, 0x1522: 0x4000, 0x1523: 0x4000,
+	0x1524: 0x4000, 0x1525: 0x4000, 0x1526: 0x4000, 0x1527: 0x4000, 0x1528: 0x4000, 0x1529: 0x4000,
+	0x152a: 0x4000, 0x152b: 0x4000, 0x152c: 0x4000, 0x152d: 0x4000, 0x152e: 0x4000, 0x152f: 0x4000,
+	0x1530: 0x4000, 0x1531: 0x4000, 0x1532: 0x4000, 0x1533: 0x4000, 0x1534: 0x4000, 0x1535: 0x4000,
+	0x1536: 0x4000, 0x1537: 0x4000, 0x1538: 0x4000, 0x1539: 0x4000, 0x153a: 0x4000, 0x153b: 0x4000,
+	0x153c: 0x4000, 0x153d: 0x4000, 0x153e: 0x4000, 0x153f: 0x4000,
+	// Block 0x55, offset 0x1540
+	0x1540: 0x4000, 0x1541: 0x4000, 0x1542: 0x4000, 0x1543: 0x4000, 0x1544: 0x4000, 0x1545: 0x4000,
+	0x1546: 0x4000, 0x1547: 0x4000, 0x1548: 0x4000, 0x1549: 0x4000, 0x154a: 0x4000,
+	0x154f: 0x4000, 0x1550: 0x4000, 0x1551: 0x4000,
+	0x1552: 0x4000, 0x1553: 0x4000,
+	0x1560: 0x4000, 0x1561: 0x4000, 0x1562: 0x4000, 0x1563: 0x4000,
+	0x1564: 0x4000, 0x1565: 0x4000, 0x1566: 0x4000, 0x1567: 0x4000, 0x1568: 0x4000, 0x1569: 0x4000,
+	0x156a: 0x4000, 0x156b: 0x4000, 0x156c: 0x4000, 0x156d: 0x4000, 0x156e: 0x4000, 0x156f: 0x4000,
+	0x1570: 0x4000, 0x1574: 0x4000,
+	0x1578: 0x4000, 0x1579: 0x4000, 0x157a: 0x4000, 0x157b: 0x4000,
+	0x157c: 0x4000, 0x157d: 0x4000, 0x157e: 0x4000, 0x157f: 0x4000,
+	// Block 0x56, offset 0x1580
+	0x1580: 0x4000, 0x1582: 0x4000, 0x1583: 0x4000, 0x1584: 0x4000, 0x1585: 0x4000,
+	0x1586: 0x4000, 0x1587: 0x4000, 0x1588: 0x4000, 0x1589: 0x4000, 0x158a: 0x4000, 0x158b: 0x4000,
+	0x158c: 0x4000, 0x158d: 0x4000, 0x158e: 0x4000, 0x158f: 0x4000, 0x1590: 0x4000, 0x1591: 0x4000,
+	0x1592: 0x4000, 0x1593: 0x4000, 0x1594: 0x4000, 0x1595: 0x4000, 0x1596: 0x4000, 0x1597: 0x4000,
+	0x1598: 0x4000, 0x1599: 0x4000, 0x159a: 0x4000, 0x159b: 0x4000, 0x159c: 0x4000, 0x159d: 0x4000,
+	0x159e: 0x4000, 0x159f: 0x4000, 0x15a0: 0x4000, 0x15a1: 0x4000, 0x15a2: 0x4000, 0x15a3: 0x4000,
+	0x15a4: 0x4000, 0x15a5: 0x4000, 0x15a6: 0x4000, 0x15a7: 0x4000, 0x15a8: 0x4000, 0x15a9: 0x4000,
+	0x15aa: 0x4000, 0x15ab: 0x4000, 0x15ac: 0x4000, 0x15ad: 0x4000, 0x15ae: 0x4000, 0x15af: 0x4000,
+	0x15b0: 0x4000, 0x15b1: 0x4000, 0x15b2: 0x4000, 0x15b3: 0x4000, 0x15b4: 0x4000, 0x15b5: 0x4000,
+	0x15b6: 0x4000, 0x15b7: 0x4000, 0x15b8: 0x4000, 0x15b9: 0x4000, 0x15ba: 0x4000, 0x15bb: 0x4000,
+	0x15bc: 0x4000, 0x15bd: 0x4000, 0x15be: 0x4000, 0x15bf: 0x4000,
+	// Block 0x57, offset 0x15c0
+	0x15c0: 0x4000, 0x15c1: 0x4000, 0x15c2: 0x4000, 0x15c3: 0x4000, 0x15c4: 0x4000, 0x15c5: 0x4000,
+	0x15c6: 0x4000, 0x15c7: 0x4000, 0x15c8: 0x4000, 0x15c9: 0x4000, 0x15ca: 0x4000, 0x15cb: 0x4000,
+	0x15cc: 0x4000, 0x15cd: 0x4000, 0x15ce: 0x4000, 0x15cf: 0x4000, 0x15d0: 0x4000, 0x15d1: 0x4000,
+	0x15d2: 0x4000, 0x15d3: 0x4000, 0x15d4: 0x4000, 0x15d5: 0x4000, 0x15d6: 0x4000, 0x15d7: 0x4000,
+	0x15d8: 0x4000, 0x15d9: 0x4000, 0x15da: 0x4000, 0x15db: 0x4000, 0x15dc: 0x4000, 0x15dd: 0x4000,
+	0x15de: 0x4000, 0x15df: 0x4000, 0x15e0: 0x4000, 0x15e1: 0x4000, 0x15e2: 0x4000, 0x15e3: 0x4000,
+	0x15e4: 0x4000, 0x15e5: 0x4000, 0x15e6: 0x4000, 0x15e7: 0x4000, 0x15e8: 0x4000, 0x15e9: 0x4000,
+	0x15ea: 0x4000, 0x15eb: 0x4000, 0x15ec: 0x4000, 0x15ed: 0x4000, 0x15ee: 0x4000, 0x15ef: 0x4000,
+	0x15f0: 0x4000, 0x15f1: 0x4000, 0x15f2: 0x4000, 0x15f3: 0x4000, 0x15f4: 0x4000, 0x15f5: 0x4000,
+	0x15f6: 0x4000, 0x15f7: 0x4000, 0x15f8: 0x4000, 0x15f9: 0x4000, 0x15fa: 0x4000, 0x15fb: 0x4000,
+	0x15fc: 0x4000, 0x15ff: 0x4000,
+	// Block 0x58, offset 0x1600
+	0x1600: 0x4000, 0x1601: 0x4000, 0x1602: 0x4000, 0x1603: 0x4000, 0x1604: 0x4000, 0x1605: 0x4000,
+	0x1606: 0x4000, 0x1607: 0x4000, 0x1608: 0x4000, 0x1609: 0x4000, 0x160a: 0x4000, 0x160b: 0x4000,
+	0x160c: 0x4000, 0x160d: 0x4000, 0x160e: 0x4000, 0x160f: 0x4000, 0x1610: 0x4000, 0x1611: 0x4000,
+	0x1612: 0x4000, 0x1613: 0x4000, 0x1614: 0x4000, 0x1615: 0x4000, 0x1616: 0x4000, 0x1617: 0x4000,
+	0x1618: 0x4000, 0x1619: 0x4000, 0x161a: 0x4000, 0x161b: 0x4000, 0x161c: 0x4000, 0x161d: 0x4000,
+	0x161e: 0x4000, 0x161f: 0x4000, 0x1620: 0x4000, 0x1621: 0x4000, 0x1622: 0x4000, 0x1623: 0x4000,
+	0x1624: 0x4000, 0x1625: 0x4000, 0x1626: 0x4000, 0x1627: 0x4000, 0x1628: 0x4000, 0x1629: 0x4000,
+	0x162a: 0x4000, 0x162b: 0x4000, 0x162c: 0x4000, 0x162d: 0x4000, 0x162e: 0x4000, 0x162f: 0x4000,
+	0x1630: 0x4000, 0x1631: 0x4000, 0x1632: 0x4000, 0x1633: 0x4000, 0x1634: 0x4000, 0x1635: 0x4000,
+	0x1636: 0x4000, 0x1637: 0x4000, 0x1638: 0x4000, 0x1639: 0x4000, 0x163a: 0x4000, 0x163b: 0x4000,
+	0x163c: 0x4000, 0x163d: 0x4000,
+	// Block 0x59, offset 0x1640
+	0x164b: 0x4000,
+	0x164c: 0x4000, 0x164d: 0x4000, 0x164e: 0x4000, 0x1650: 0x4000, 0x1651: 0x4000,
+	0x1652: 0x4000, 0x1653: 0x4000, 0x1654: 0x4000, 0x1655: 0x4000, 0x1656: 0x4000, 0x1657: 0x4000,
+	0x1658: 0x4000, 0x1659: 0x4000, 0x165a: 0x4000, 0x165b: 0x4000, 0x165c: 0x4000, 0x165d: 0x4000,
+	0x165e: 0x4000, 0x165f: 0x4000, 0x1660: 0x4000, 0x1661: 0x4000, 0x1662: 0x4000, 0x1663: 0x4000,
+	0x1664: 0x4000, 0x1665: 0x4000, 0x1666: 0x4000, 0x1667: 0x4000,
+	0x167a: 0x4000,
+	// Block 0x5a, offset 0x1680
+	0x1695: 0x4000, 0x1696: 0x4000,
+	0x16a4: 0x4000,
+	// Block 0x5b, offset 0x16c0
+	0x16fb: 0x4000,
+	0x16fc: 0x4000, 0x16fd: 0x4000, 0x16fe: 0x4000, 0x16ff: 0x4000,
+	// Block 0x5c, offset 0x1700
+	0x1700: 0x4000, 0x1701: 0x4000, 0x1702: 0x4000, 0x1703: 0x4000, 0x1704: 0x4000, 0x1705: 0x4000,
+	0x1706: 0x4000, 0x1707: 0x4000, 0x1708: 0x4000, 0x1709: 0x4000, 0x170a: 0x4000, 0x170b: 0x4000,
+	0x170c: 0x4000, 0x170d: 0x4000, 0x170e: 0x4000, 0x170f: 0x4000,
+	// Block 0x5d, offset 0x1740
+	0x1740: 0x4000, 0x1741: 0x4000, 0x1742: 0x4000, 0x1743: 0x4000, 0x1744: 0x4000, 0x1745: 0x4000,
+	0x174c: 0x4000, 0x1750: 0x4000, 0x1751: 0x4000,
+	0x1752: 0x4000,
+	0x176b: 0x4000, 0x176c: 0x4000,
+	0x1774: 0x4000, 0x1775: 0x4000,
+	0x1776: 0x4000,
+	// Block 0x5e, offset 0x1780
+	0x1790: 0x4000, 0x1791: 0x4000,
+	0x1792: 0x4000, 0x1793: 0x4000, 0x1794: 0x4000, 0x1795: 0x4000, 0x1796: 0x4000, 0x1797: 0x4000,
+	0x1798: 0x4000, 0x1799: 0x4000, 0x179a: 0x4000, 0x179b: 0x4000, 0x179c: 0x4000, 0x179d: 0x4000,
+	0x179e: 0x4000, 0x17a0: 0x4000, 0x17a1: 0x4000, 0x17a2: 0x4000, 0x17a3: 0x4000,
+	0x17a4: 0x4000, 0x17a5: 0x4000, 0x17a6: 0x4000, 0x17a7: 0x4000,
+	0x17b0: 0x4000, 0x17b3: 0x4000, 0x17b4: 0x4000, 0x17b5: 0x4000,
+	0x17b6: 0x4000, 0x17b7: 0x4000, 0x17b8: 0x4000, 0x17b9: 0x4000, 0x17ba: 0x4000, 0x17bb: 0x4000,
+	0x17bc: 0x4000, 0x17bd: 0x4000, 0x17be: 0x4000,
+	// Block 0x5f, offset 0x17c0
+	0x17c0: 0x4000, 0x17c1: 0x4000, 0x17c2: 0x4000, 0x17c3: 0x4000, 0x17c4: 0x4000, 0x17c5: 0x4000,
+	0x17c6: 0x4000, 0x17c7: 0x4000, 0x17c8: 0x4000, 0x17c9: 0x4000, 0x17ca: 0x4000, 0x17cb: 0x4000,
+	0x17d0: 0x4000, 0x17d1: 0x4000,
+	0x17d2: 0x4000, 0x17d3: 0x4000, 0x17d4: 0x4000, 0x17d5: 0x4000, 0x17d6: 0x4000, 0x17d7: 0x4000,
+	0x17d8: 0x4000, 0x17d9: 0x4000, 0x17da: 0x4000, 0x17db: 0x4000, 0x17dc: 0x4000, 0x17dd: 0x4000,
+	0x17de: 0x4000,
+	// Block 0x60, offset 0x1800
+	0x1800: 0x4000, 0x1801: 0x4000, 0x1802: 0x4000, 0x1803: 0x4000, 0x1804: 0x4000, 0x1805: 0x4000,
+	0x1806: 0x4000, 0x1807: 0x4000, 0x1808: 0x4000, 0x1809: 0x4000, 0x180a: 0x4000, 0x180b: 0x4000,
+	0x180c: 0x4000, 0x180d: 0x4000, 0x180e: 0x4000, 0x180f: 0x4000, 0x1810: 0x4000, 0x1811: 0x4000,
+	// Block 0x61, offset 0x1840
+	0x1840: 0x4000,
+	// Block 0x62, offset 0x1880
+	0x1880: 0x2000, 0x1881: 0x2000, 0x1882: 0x2000, 0x1883: 0x2000, 0x1884: 0x2000, 0x1885: 0x2000,
+	0x1886: 0x2000, 0x1887: 0x2000, 0x1888: 0x2000, 0x1889: 0x2000, 0x188a: 0x2000, 0x188b: 0x2000,
+	0x188c: 0x2000, 0x188d: 0x2000, 0x188e: 0x2000, 0x188f: 0x2000, 0x1890: 0x2000, 0x1891: 0x2000,
+	0x1892: 0x2000, 0x1893: 0x2000, 0x1894: 0x2000, 0x1895: 0x2000, 0x1896: 0x2000, 0x1897: 0x2000,
+	0x1898: 0x2000, 0x1899: 0x2000, 0x189a: 0x2000, 0x189b: 0x2000, 0x189c: 0x2000, 0x189d: 0x2000,
+	0x189e: 0x2000, 0x189f: 0x2000, 0x18a0: 0x2000, 0x18a1: 0x2000, 0x18a2: 0x2000, 0x18a3: 0x2000,
+	0x18a4: 0x2000, 0x18a5: 0x2000, 0x18a6: 0x2000, 0x18a7: 0x2000, 0x18a8: 0x2000, 0x18a9: 0x2000,
+	0x18aa: 0x2000, 0x18ab: 0x2000, 0x18ac: 0x2000, 0x18ad: 0x2000, 0x18ae: 0x2000, 0x18af: 0x2000,
+	0x18b0: 0x2000, 0x18b1: 0x2000, 0x18b2: 0x2000, 0x18b3: 0x2000, 0x18b4: 0x2000, 0x18b5: 0x2000,
+	0x18b6: 0x2000, 0x18b7: 0x2000, 0x18b8: 0x2000, 0x18b9: 0x2000, 0x18ba: 0x2000, 0x18bb: 0x2000,
+	0x18bc: 0x2000, 0x18bd: 0x2000,
+}
+
+// widthIndex: 22 blocks, 1408 entries, 1408 bytes
+// Block 0 is the zero block.
+var widthIndex = [1408]uint8{
+	// Block 0x0, offset 0x0
+	// Block 0x1, offset 0x40
+	// Block 0x2, offset 0x80
+	// Block 0x3, offset 0xc0
+	0xc2: 0x01, 0xc3: 0x02, 0xc4: 0x03, 0xc5: 0x04, 0xc7: 0x05,
+	0xc9: 0x06, 0xcb: 0x07, 0xcc: 0x08, 0xcd: 0x09, 0xce: 0x0a, 0xcf: 0x0b,
+	0xd0: 0x0c, 0xd1: 0x0d,
+	0xe1: 0x02, 0xe2: 0x03, 0xe3: 0x04, 0xe4: 0x05, 0xe5: 0x06, 0xe6: 0x06, 0xe7: 0x06,
+	0xe8: 0x06, 0xe9: 0x06, 0xea: 0x07, 0xeb: 0x06, 0xec: 0x06, 0xed: 0x08, 0xee: 0x09, 0xef: 0x0a,
+	0xf0: 0x0f, 0xf3: 0x12, 0xf4: 0x13,
+	// Block 0x4, offset 0x100
+	0x104: 0x0e, 0x105: 0x0f,
+	// Block 0x5, offset 0x140
+	0x140: 0x10, 0x141: 0x11, 0x142: 0x12, 0x144: 0x13, 0x145: 0x14, 0x146: 0x15, 0x147: 0x16,
+	0x148: 0x17, 0x149: 0x18, 0x14a: 0x19, 0x14c: 0x1a, 0x14f: 0x1b,
+	0x151: 0x1c, 0x152: 0x08, 0x153: 0x1d, 0x154: 0x1e, 0x155: 0x1f, 0x156: 0x20, 0x157: 0x21,
+	0x158: 0x22, 0x159: 0x23, 0x15a: 0x24, 0x15b: 0x25, 0x15c: 0x26, 0x15d: 0x27, 0x15e: 0x28, 0x15f: 0x29,
+	0x166: 0x2a,
+	0x16c: 0x2b, 0x16d: 0x2c,
+	0x17a: 0x2d, 0x17b: 0x2e, 0x17c: 0x0e, 0x17d: 0x0e, 0x17e: 0x0e, 0x17f: 0x2f,
+	// Block 0x6, offset 0x180
+	0x180: 0x30, 0x181: 0x31, 0x182: 0x32, 0x183: 0x33, 0x184: 0x34, 0x185: 0x35, 0x186: 0x36, 0x187: 0x37,
+	0x188: 0x38, 0x189: 0x39, 0x18a: 0x0e, 0x18b: 0x3a, 0x18c: 0x0e, 0x18d: 0x0e, 0x18e: 0x0e, 0x18f: 0x0e,
+	0x190: 0x0e, 0x191: 0x0e, 0x192: 0x0e, 0x193: 0x0e, 0x194: 0x0e, 0x195: 0x0e, 0x196: 0x0e, 0x197: 0x0e,
+	0x198: 0x0e, 0x199: 0x0e, 0x19a: 0x0e, 0x19b: 0x0e, 0x19c: 0x0e, 0x19d: 0x0e, 0x19e: 0x0e, 0x19f: 0x0e,
+	0x1a0: 0x0e, 0x1a1: 0x0e, 0x1a2: 0x0e, 0x1a3: 0x0e, 0x1a4: 0x0e, 0x1a5: 0x0e, 0x1a6: 0x0e, 0x1a7: 0x0e,
+	0x1a8: 0x0e, 0x1a9: 0x0e, 0x1aa: 0x0e, 0x1ab: 0x0e, 0x1ac: 0x0e, 0x1ad: 0x0e, 0x1ae: 0x0e, 0x1af: 0x0e,
+	0x1b0: 0x0e, 0x1b1: 0x0e, 0x1b2: 0x0e, 0x1b3: 0x0e, 0x1b4: 0x0e, 0x1b5: 0x0e, 0x1b6: 0x0e, 0x1b7: 0x0e,
+	0x1b8: 0x0e, 0x1b9: 0x0e, 0x1ba: 0x0e, 0x1bb: 0x0e, 0x1bc: 0x0e, 0x1bd: 0x0e, 0x1be: 0x0e, 0x1bf: 0x0e,
+	// Block 0x7, offset 0x1c0
+	0x1c0: 0x0e, 0x1c1: 0x0e, 0x1c2: 0x0e, 0x1c3: 0x0e, 0x1c4: 0x0e, 0x1c5: 0x0e, 0x1c6: 0x0e, 0x1c7: 0x0e,
+	0x1c8: 0x0e, 0x1c9: 0x0e, 0x1ca: 0x0e, 0x1cb: 0x0e, 0x1cc: 0x0e, 0x1cd: 0x0e, 0x1ce: 0x0e, 0x1cf: 0x0e,
+	0x1d0: 0x0e, 0x1d1: 0x0e, 0x1d2: 0x0e, 0x1d3: 0x0e, 0x1d4: 0x0e, 0x1d5: 0x0e, 0x1d6: 0x0e, 0x1d7: 0x0e,
+	0x1d8: 0x0e, 0x1d9: 0x0e, 0x1da: 0x0e, 0x1db: 0x0e, 0x1dc: 0x0e, 0x1dd: 0x0e, 0x1de: 0x0e, 0x1df: 0x0e,
+	0x1e0: 0x0e, 0x1e1: 0x0e, 0x1e2: 0x0e, 0x1e3: 0x0e, 0x1e4: 0x0e, 0x1e5: 0x0e, 0x1e6: 0x0e, 0x1e7: 0x0e,
+	0x1e8: 0x0e, 0x1e9: 0x0e, 0x1ea: 0x0e, 0x1eb: 0x0e, 0x1ec: 0x0e, 0x1ed: 0x0e, 0x1ee: 0x0e, 0x1ef: 0x0e,
+	0x1f0: 0x0e, 0x1f1: 0x0e, 0x1f2: 0x0e, 0x1f3: 0x0e, 0x1f4: 0x0e, 0x1f5: 0x0e, 0x1f6: 0x0e,
+	0x1f8: 0x0e, 0x1f9: 0x0e, 0x1fa: 0x0e, 0x1fb: 0x0e, 0x1fc: 0x0e, 0x1fd: 0x0e, 0x1fe: 0x0e, 0x1ff: 0x0e,
+	// Block 0x8, offset 0x200
+	0x200: 0x0e, 0x201: 0x0e, 0x202: 0x0e, 0x203: 0x0e, 0x204: 0x0e, 0x205: 0x0e, 0x206: 0x0e, 0x207: 0x0e,
+	0x208: 0x0e, 0x209: 0x0e, 0x20a: 0x0e, 0x20b: 0x0e, 0x20c: 0x0e, 0x20d: 0x0e, 0x20e: 0x0e, 0x20f: 0x0e,
+	0x210: 0x0e, 0x211: 0x0e, 0x212: 0x0e, 0x213: 0x0e, 0x214: 0x0e, 0x215: 0x0e, 0x216: 0x0e, 0x217: 0x0e,
+	0x218: 0x0e, 0x219: 0x0e, 0x21a: 0x0e, 0x21b: 0x0e, 0x21c: 0x0e, 0x21d: 0x0e, 0x21e: 0x0e, 0x21f: 0x0e,
+	0x220: 0x0e, 0x221: 0x0e, 0x222: 0x0e, 0x223: 0x0e, 0x224: 0x0e, 0x225: 0x0e, 0x226: 0x0e, 0x227: 0x0e,
+	0x228: 0x0e, 0x229: 0x0e, 0x22a: 0x0e, 0x22b: 0x0e, 0x22c: 0x0e, 0x22d: 0x0e, 0x22e: 0x0e, 0x22f: 0x0e,
+	0x230: 0x0e, 0x231: 0x0e, 0x232: 0x0e, 0x233: 0x0e, 0x234: 0x0e, 0x235: 0x0e, 0x236: 0x0e, 0x237: 0x0e,
+	0x238: 0x0e, 0x239: 0x0e, 0x23a: 0x0e, 0x23b: 0x0e, 0x23c: 0x0e, 0x23d: 0x0e, 0x23e: 0x0e, 0x23f: 0x0e,
+	// Block 0x9, offset 0x240
+	0x240: 0x0e, 0x241: 0x0e, 0x242: 0x0e, 0x243: 0x0e, 0x244: 0x0e, 0x245: 0x0e, 0x246: 0x0e, 0x247: 0x0e,
+	0x248: 0x0e, 0x249: 0x0e, 0x24a: 0x0e, 0x24b: 0x0e, 0x24c: 0x0e, 0x24d: 0x0e, 0x24e: 0x0e, 0x24f: 0x0e,
+	0x250: 0x0e, 0x251: 0x0e, 0x252: 0x3b, 0x253: 0x3c,
+	0x265: 0x3d,
+	0x270: 0x0e, 0x271: 0x0e, 0x272: 0x0e, 0x273: 0x0e, 0x274: 0x0e, 0x275: 0x0e, 0x276: 0x0e, 0x277: 0x0e,
+	0x278: 0x0e, 0x279: 0x0e, 0x27a: 0x0e, 0x27b: 0x0e, 0x27c: 0x0e, 0x27d: 0x0e, 0x27e: 0x0e, 0x27f: 0x0e,
+	// Block 0xa, offset 0x280
+	0x280: 0x0e, 0x281: 0x0e, 0x282: 0x0e, 0x283: 0x0e, 0x284: 0x0e, 0x285: 0x0e, 0x286: 0x0e, 0x287: 0x0e,
+	0x288: 0x0e, 0x289: 0x0e, 0x28a: 0x0e, 0x28b: 0x0e, 0x28c: 0x0e, 0x28d: 0x0e, 0x28e: 0x0e, 0x28f: 0x0e,
+	0x290: 0x0e, 0x291: 0x0e, 0x292: 0x0e, 0x293: 0x0e, 0x294: 0x0e, 0x295: 0x0e, 0x296: 0x0e, 0x297: 0x0e,
+	0x298: 0x0e, 0x299: 0x0e, 0x29a: 0x0e, 0x29b: 0x0e, 0x29c: 0x0e, 0x29d: 0x0e, 0x29e: 0x3e,
+	// Block 0xb, offset 0x2c0
+	0x2c0: 0x08, 0x2c1: 0x08, 0x2c2: 0x08, 0x2c3: 0x08, 0x2c4: 0x08, 0x2c5: 0x08, 0x2c6: 0x08, 0x2c7: 0x08,
+	0x2c8: 0x08, 0x2c9: 0x08, 0x2ca: 0x08, 0x2cb: 0x08, 0x2cc: 0x08, 0x2cd: 0x08, 0x2ce: 0x08, 0x2cf: 0x08,
+	0x2d0: 0x08, 0x2d1: 0x08, 0x2d2: 0x08, 0x2d3: 0x08, 0x2d4: 0x08, 0x2d5: 0x08, 0x2d6: 0x08, 0x2d7: 0x08,
+	0x2d8: 0x08, 0x2d9: 0x08, 0x2da: 0x08, 0x2db: 0x08, 0x2dc: 0x08, 0x2dd: 0x08, 0x2de: 0x08, 0x2df: 0x08,
+	0x2e0: 0x08, 0x2e1: 0x08, 0x2e2: 0x08, 0x2e3: 0x08, 0x2e4: 0x08, 0x2e5: 0x08, 0x2e6: 0x08, 0x2e7: 0x08,
+	0x2e8: 0x08, 0x2e9: 0x08, 0x2ea: 0x08, 0x2eb: 0x08, 0x2ec: 0x08, 0x2ed: 0x08, 0x2ee: 0x08, 0x2ef: 0x08,
+	0x2f0: 0x08, 0x2f1: 0x08, 0x2f2: 0x08, 0x2f3: 0x08, 0x2f4: 0x08, 0x2f5: 0x08, 0x2f6: 0x08, 0x2f7: 0x08,
+	0x2f8: 0x08, 0x2f9: 0x08, 0x2fa: 0x08, 0x2fb: 0x08, 0x2fc: 0x08, 0x2fd: 0x08, 0x2fe: 0x08, 0x2ff: 0x08,
+	// Block 0xc, offset 0x300
+	0x300: 0x08, 0x301: 0x08, 0x302: 0x08, 0x303: 0x08, 0x304: 0x08, 0x305: 0x08, 0x306: 0x08, 0x307: 0x08,
+	0x308: 0x08, 0x309: 0x08, 0x30a: 0x08, 0x30b: 0x08, 0x30c: 0x08, 0x30d: 0x08, 0x30e: 0x08, 0x30f: 0x08,
+	0x310: 0x08, 0x311: 0x08, 0x312: 0x08, 0x313: 0x08, 0x314: 0x08, 0x315: 0x08, 0x316: 0x08, 0x317: 0x08,
+	0x318: 0x08, 0x319: 0x08, 0x31a: 0x08, 0x31b: 0x08, 0x31c: 0x08, 0x31d: 0x08, 0x31e: 0x08, 0x31f: 0x08,
+	0x320: 0x08, 0x321: 0x08, 0x322: 0x08, 0x323: 0x08, 0x324: 0x0e, 0x325: 0x0e, 0x326: 0x0e, 0x327: 0x0e,
+	0x328: 0x0e, 0x329: 0x0e, 0x32a: 0x0e, 0x32b: 0x0e,
+	0x338: 0x3f, 0x339: 0x40, 0x33c: 0x41, 0x33d: 0x42, 0x33e: 0x43, 0x33f: 0x44,
+	// Block 0xd, offset 0x340
+	0x37f: 0x45,
+	// Block 0xe, offset 0x380
+	0x380: 0x0e, 0x381: 0x0e, 0x382: 0x0e, 0x383: 0x0e, 0x384: 0x0e, 0x385: 0x0e, 0x386: 0x0e, 0x387: 0x0e,
+	0x388: 0x0e, 0x389: 0x0e, 0x38a: 0x0e, 0x38b: 0x0e, 0x38c: 0x0e, 0x38d: 0x0e, 0x38e: 0x0e, 0x38f: 0x0e,
+	0x390: 0x0e, 0x391: 0x0e, 0x392: 0x0e, 0x393: 0x0e, 0x394: 0x0e, 0x395: 0x0e, 0x396: 0x0e, 0x397: 0x0e,
+	0x398: 0x0e, 0x399: 0x0e, 0x39a: 0x0e, 0x39b: 0x0e, 0x39c: 0x0e, 0x39d: 0x0e, 0x39e: 0x0e, 0x39f: 0x46,
+	0x3a0: 0x0e, 0x3a1: 0x0e, 0x3a2: 0x0e, 0x3a3: 0x0e, 0x3a4: 0x0e, 0x3a5: 0x0e, 0x3a6: 0x0e, 0x3a7: 0x0e,
+	0x3a8: 0x0e, 0x3a9: 0x0e, 0x3aa: 0x0e, 0x3ab: 0x47,
+	// Block 0xf, offset 0x3c0
+	0x3c0: 0x48,
+	// Block 0x10, offset 0x400
+	0x400: 0x49, 0x403: 0x4a, 0x404: 0x4b, 0x405: 0x4c, 0x406: 0x4d,
+	0x408: 0x4e, 0x409: 0x4f, 0x40c: 0x50, 0x40d: 0x51, 0x40e: 0x52, 0x40f: 0x53,
+	0x410: 0x3a, 0x411: 0x54, 0x412: 0x0e, 0x413: 0x55, 0x414: 0x56, 0x415: 0x57, 0x416: 0x58, 0x417: 0x59,
+	0x418: 0x0e, 0x419: 0x5a, 0x41a: 0x0e, 0x41b: 0x5b,
+	0x424: 0x5c, 0x425: 0x5d, 0x426: 0x5e, 0x427: 0x5f,
+	// Block 0x11, offset 0x440
+	0x456: 0x0b, 0x457: 0x06,
+	0x458: 0x0c, 0x45b: 0x0d, 0x45f: 0x0e,
+	0x460: 0x06, 0x461: 0x06, 0x462: 0x06, 0x463: 0x06, 0x464: 0x06, 0x465: 0x06, 0x466: 0x06, 0x467: 0x06,
+	0x468: 0x06, 0x469: 0x06, 0x46a: 0x06, 0x46b: 0x06, 0x46c: 0x06, 0x46d: 0x06, 0x46e: 0x06, 0x46f: 0x06,
+	0x470: 0x06, 0x471: 0x06, 0x472: 0x06, 0x473: 0x06, 0x474: 0x06, 0x475: 0x06, 0x476: 0x06, 0x477: 0x06,
+	0x478: 0x06, 0x479: 0x06, 0x47a: 0x06, 0x47b: 0x06, 0x47c: 0x06, 0x47d: 0x06, 0x47e: 0x06, 0x47f: 0x06,
+	// Block 0x12, offset 0x480
+	0x484: 0x08, 0x485: 0x08, 0x486: 0x08, 0x487: 0x09,
+	// Block 0x13, offset 0x4c0
+	0x4c0: 0x08, 0x4c1: 0x08, 0x4c2: 0x08, 0x4c3: 0x08, 0x4c4: 0x08, 0x4c5: 0x08, 0x4c6: 0x08, 0x4c7: 0x08,
+	0x4c8: 0x08, 0x4c9: 0x08, 0x4ca: 0x08, 0x4cb: 0x08, 0x4cc: 0x08, 0x4cd: 0x08, 0x4ce: 0x08, 0x4cf: 0x08,
+	0x4d0: 0x08, 0x4d1: 0x08, 0x4d2: 0x08, 0x4d3: 0x08, 0x4d4: 0x08, 0x4d5: 0x08, 0x4d6: 0x08, 0x4d7: 0x08,
+	0x4d8: 0x08, 0x4d9: 0x08, 0x4da: 0x08, 0x4db: 0x08, 0x4dc: 0x08, 0x4dd: 0x08, 0x4de: 0x08, 0x4df: 0x08,
+	0x4e0: 0x08, 0x4e1: 0x08, 0x4e2: 0x08, 0x4e3: 0x08, 0x4e4: 0x08, 0x4e5: 0x08, 0x4e6: 0x08, 0x4e7: 0x08,
+	0x4e8: 0x08, 0x4e9: 0x08, 0x4ea: 0x08, 0x4eb: 0x08, 0x4ec: 0x08, 0x4ed: 0x08, 0x4ee: 0x08, 0x4ef: 0x08,
+	0x4f0: 0x08, 0x4f1: 0x08, 0x4f2: 0x08, 0x4f3: 0x08, 0x4f4: 0x08, 0x4f5: 0x08, 0x4f6: 0x08, 0x4f7: 0x08,
+	0x4f8: 0x08, 0x4f9: 0x08, 0x4fa: 0x08, 0x4fb: 0x08, 0x4fc: 0x08, 0x4fd: 0x08, 0x4fe: 0x08, 0x4ff: 0x60,
+	// Block 0x14, offset 0x500
+	0x520: 0x10,
+	0x530: 0x09, 0x531: 0x09, 0x532: 0x09, 0x533: 0x09, 0x534: 0x09, 0x535: 0x09, 0x536: 0x09, 0x537: 0x09,
+	0x538: 0x09, 0x539: 0x09, 0x53a: 0x09, 0x53b: 0x09, 0x53c: 0x09, 0x53d: 0x09, 0x53e: 0x09, 0x53f: 0x11,
+	// Block 0x15, offset 0x540
+	0x540: 0x09, 0x541: 0x09, 0x542: 0x09, 0x543: 0x09, 0x544: 0x09, 0x545: 0x09, 0x546: 0x09, 0x547: 0x09,
+	0x548: 0x09, 0x549: 0x09, 0x54a: 0x09, 0x54b: 0x09, 0x54c: 0x09, 0x54d: 0x09, 0x54e: 0x09, 0x54f: 0x11,
+}
+
+// inverseData contains 4-byte entries of the following format:
+//   <length> <modified UTF-8-encoded rune> <0 padding>
+// The last byte of the UTF-8-encoded rune is xor-ed with the last byte of the
+// UTF-8 encoding of the original rune. Mappings often have the following
+// pattern:
+//   A -> A  (U+FF21 -> U+0041)
+//   B -> B  (U+FF22 -> U+0042)
+//   ...
+// By xor-ing the last byte the same entry can be shared by many mappings. This
+// reduces the total number of distinct entries by about two thirds.
+// The resulting entry for the aforementioned mappings is
+//   { 0x01, 0xE0, 0x00, 0x00 }
+// Using this entry to map U+FF21 (UTF-8 [EF BC A1]), we get
+//   E0 ^ A1 = 41.
+// Similarly, for U+FF22 (UTF-8 [EF BC A2]), we get
+//   E0 ^ A2 = 42.
+// Note that because of the xor-ing, the byte sequence stored in the entry is
+// not valid UTF-8.
+var inverseData = [150][4]byte{
+	{0x00, 0x00, 0x00, 0x00},
+	{0x03, 0xe3, 0x80, 0xa0},
+	{0x03, 0xef, 0xbc, 0xa0},
+	{0x03, 0xef, 0xbc, 0xe0},
+	{0x03, 0xef, 0xbd, 0xe0},
+	{0x03, 0xef, 0xbf, 0x02},
+	{0x03, 0xef, 0xbf, 0x00},
+	{0x03, 0xef, 0xbf, 0x0e},
+	{0x03, 0xef, 0xbf, 0x0c},
+	{0x03, 0xef, 0xbf, 0x0f},
+	{0x03, 0xef, 0xbf, 0x39},
+	{0x03, 0xef, 0xbf, 0x3b},
+	{0x03, 0xef, 0xbf, 0x3f},
+	{0x03, 0xef, 0xbf, 0x2a},
+	{0x03, 0xef, 0xbf, 0x0d},
+	{0x03, 0xef, 0xbf, 0x25},
+	{0x03, 0xef, 0xbd, 0x1a},
+	{0x03, 0xef, 0xbd, 0x26},
+	{0x01, 0xa0, 0x00, 0x00},
+	{0x03, 0xef, 0xbd, 0x25},
+	{0x03, 0xef, 0xbd, 0x23},
+	{0x03, 0xef, 0xbd, 0x2e},
+	{0x03, 0xef, 0xbe, 0x07},
+	{0x03, 0xef, 0xbe, 0x05},
+	{0x03, 0xef, 0xbd, 0x06},
+	{0x03, 0xef, 0xbd, 0x13},
+	{0x03, 0xef, 0xbd, 0x0b},
+	{0x03, 0xef, 0xbd, 0x16},
+	{0x03, 0xef, 0xbd, 0x0c},
+	{0x03, 0xef, 0xbd, 0x15},
+	{0x03, 0xef, 0xbd, 0x0d},
+	{0x03, 0xef, 0xbd, 0x1c},
+	{0x03, 0xef, 0xbd, 0x02},
+	{0x03, 0xef, 0xbd, 0x1f},
+	{0x03, 0xef, 0xbd, 0x1d},
+	{0x03, 0xef, 0xbd, 0x17},
+	{0x03, 0xef, 0xbd, 0x08},
+	{0x03, 0xef, 0xbd, 0x09},
+	{0x03, 0xef, 0xbd, 0x0e},
+	{0x03, 0xef, 0xbd, 0x04},
+	{0x03, 0xef, 0xbd, 0x05},
+	{0x03, 0xef, 0xbe, 0x3f},
+	{0x03, 0xef, 0xbe, 0x00},
+	{0x03, 0xef, 0xbd, 0x2c},
+	{0x03, 0xef, 0xbe, 0x06},
+	{0x03, 0xef, 0xbe, 0x0c},
+	{0x03, 0xef, 0xbe, 0x0f},
+	{0x03, 0xef, 0xbe, 0x0d},
+	{0x03, 0xef, 0xbe, 0x0b},
+	{0x03, 0xef, 0xbe, 0x19},
+	{0x03, 0xef, 0xbe, 0x15},
+	{0x03, 0xef, 0xbe, 0x11},
+	{0x03, 0xef, 0xbe, 0x31},
+	{0x03, 0xef, 0xbe, 0x33},
+	{0x03, 0xef, 0xbd, 0x0f},
+	{0x03, 0xef, 0xbe, 0x30},
+	{0x03, 0xef, 0xbe, 0x3e},
+	{0x03, 0xef, 0xbe, 0x32},
+	{0x03, 0xef, 0xbe, 0x36},
+	{0x03, 0xef, 0xbd, 0x14},
+	{0x03, 0xef, 0xbe, 0x2e},
+	{0x03, 0xef, 0xbd, 0x1e},
+	{0x03, 0xef, 0xbe, 0x10},
+	{0x03, 0xef, 0xbf, 0x13},
+	{0x03, 0xef, 0xbf, 0x15},
+	{0x03, 0xef, 0xbf, 0x17},
+	{0x03, 0xef, 0xbf, 0x1f},
+	{0x03, 0xef, 0xbf, 0x1d},
+	{0x03, 0xef, 0xbf, 0x1b},
+	{0x03, 0xef, 0xbf, 0x09},
+	{0x03, 0xef, 0xbf, 0x0b},
+	{0x03, 0xef, 0xbf, 0x37},
+	{0x03, 0xef, 0xbe, 0x04},
+	{0x01, 0xe0, 0x00, 0x00},
+	{0x03, 0xe2, 0xa6, 0x1a},
+	{0x03, 0xe2, 0xa6, 0x26},
+	{0x03, 0xe3, 0x80, 0x23},
+	{0x03, 0xe3, 0x80, 0x2e},
+	{0x03, 0xe3, 0x80, 0x25},
+	{0x03, 0xe3, 0x83, 0x1e},
+	{0x03, 0xe3, 0x83, 0x14},
+	{0x03, 0xe3, 0x82, 0x06},
+	{0x03, 0xe3, 0x82, 0x0b},
+	{0x03, 0xe3, 0x82, 0x0c},
+	{0x03, 0xe3, 0x82, 0x0d},
+	{0x03, 0xe3, 0x82, 0x02},
+	{0x03, 0xe3, 0x83, 0x0f},
+	{0x03, 0xe3, 0x83, 0x08},
+	{0x03, 0xe3, 0x83, 0x09},
+	{0x03, 0xe3, 0x83, 0x2c},
+	{0x03, 0xe3, 0x83, 0x0c},
+	{0x03, 0xe3, 0x82, 0x13},
+	{0x03, 0xe3, 0x82, 0x16},
+	{0x03, 0xe3, 0x82, 0x15},
+	{0x03, 0xe3, 0x82, 0x1c},
+	{0x03, 0xe3, 0x82, 0x1f},
+	{0x03, 0xe3, 0x82, 0x1d},
+	{0x03, 0xe3, 0x82, 0x1a},
+	{0x03, 0xe3, 0x82, 0x17},
+	{0x03, 0xe3, 0x82, 0x08},
+	{0x03, 0xe3, 0x82, 0x09},
+	{0x03, 0xe3, 0x82, 0x0e},
+	{0x03, 0xe3, 0x82, 0x04},
+	{0x03, 0xe3, 0x82, 0x05},
+	{0x03, 0xe3, 0x82, 0x3f},
+	{0x03, 0xe3, 0x83, 0x00},
+	{0x03, 0xe3, 0x83, 0x06},
+	{0x03, 0xe3, 0x83, 0x05},
+	{0x03, 0xe3, 0x83, 0x0d},
+	{0x03, 0xe3, 0x83, 0x0b},
+	{0x03, 0xe3, 0x83, 0x07},
+	{0x03, 0xe3, 0x83, 0x19},
+	{0x03, 0xe3, 0x83, 0x15},
+	{0x03, 0xe3, 0x83, 0x11},
+	{0x03, 0xe3, 0x83, 0x31},
+	{0x03, 0xe3, 0x83, 0x33},
+	{0x03, 0xe3, 0x83, 0x30},
+	{0x03, 0xe3, 0x83, 0x3e},
+	{0x03, 0xe3, 0x83, 0x32},
+	{0x03, 0xe3, 0x83, 0x36},
+	{0x03, 0xe3, 0x83, 0x2e},
+	{0x03, 0xe3, 0x82, 0x07},
+	{0x03, 0xe3, 0x85, 0x04},
+	{0x03, 0xe3, 0x84, 0x10},
+	{0x03, 0xe3, 0x85, 0x30},
+	{0x03, 0xe3, 0x85, 0x0d},
+	{0x03, 0xe3, 0x85, 0x13},
+	{0x03, 0xe3, 0x85, 0x15},
+	{0x03, 0xe3, 0x85, 0x17},
+	{0x03, 0xe3, 0x85, 0x1f},
+	{0x03, 0xe3, 0x85, 0x1d},
+	{0x03, 0xe3, 0x85, 0x1b},
+	{0x03, 0xe3, 0x85, 0x09},
+	{0x03, 0xe3, 0x85, 0x0f},
+	{0x03, 0xe3, 0x85, 0x0b},
+	{0x03, 0xe3, 0x85, 0x37},
+	{0x03, 0xe3, 0x85, 0x3b},
+	{0x03, 0xe3, 0x85, 0x39},
+	{0x03, 0xe3, 0x85, 0x3f},
+	{0x02, 0xc2, 0x02, 0x00},
+	{0x02, 0xc2, 0x0e, 0x00},
+	{0x02, 0xc2, 0x0c, 0x00},
+	{0x02, 0xc2, 0x00, 0x00},
+	{0x03, 0xe2, 0x82, 0x0f},
+	{0x03, 0xe2, 0x94, 0x2a},
+	{0x03, 0xe2, 0x86, 0x39},
+	{0x03, 0xe2, 0x86, 0x3b},
+	{0x03, 0xe2, 0x86, 0x3f},
+	{0x03, 0xe2, 0x96, 0x0d},
+	{0x03, 0xe2, 0x97, 0x25},
+}
+
+// Total table size 14680 bytes (14KiB)
diff --git a/vendor/golang.org/x/text/width/transform.go b/vendor/golang.org/x/text/width/transform.go
new file mode 100644
index 00000000..0049f700
--- /dev/null
+++ b/vendor/golang.org/x/text/width/transform.go
@@ -0,0 +1,239 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package width
+
+import (
+	"unicode/utf8"
+
+	"golang.org/x/text/transform"
+)
+
+type foldTransform struct {
+	transform.NopResetter
+}
+
+func (foldTransform) Span(src []byte, atEOF bool) (n int, err error) {
+	for n < len(src) {
+		if src[n] < utf8.RuneSelf {
+			// ASCII fast path.
+			for n++; n < len(src) && src[n] < utf8.RuneSelf; n++ {
+			}
+			continue
+		}
+		v, size := trie.lookup(src[n:])
+		if size == 0 { // incomplete UTF-8 encoding
+			if !atEOF {
+				err = transform.ErrShortSrc
+			} else {
+				n = len(src)
+			}
+			break
+		}
+		if elem(v)&tagNeedsFold != 0 {
+			err = transform.ErrEndOfSpan
+			break
+		}
+		n += size
+	}
+	return n, err
+}
+
+func (foldTransform) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
+	for nSrc < len(src) {
+		if src[nSrc] < utf8.RuneSelf {
+			// ASCII fast path.
+			start, end := nSrc, len(src)
+			if d := len(dst) - nDst; d < end-start {
+				end = nSrc + d
+			}
+			for nSrc++; nSrc < end && src[nSrc] < utf8.RuneSelf; nSrc++ {
+			}
+			n := copy(dst[nDst:], src[start:nSrc])
+			if nDst += n; nDst == len(dst) {
+				nSrc = start + n
+				if nSrc == len(src) {
+					return nDst, nSrc, nil
+				}
+				if src[nSrc] < utf8.RuneSelf {
+					return nDst, nSrc, transform.ErrShortDst
+				}
+			}
+			continue
+		}
+		v, size := trie.lookup(src[nSrc:])
+		if size == 0 { // incomplete UTF-8 encoding
+			if !atEOF {
+				return nDst, nSrc, transform.ErrShortSrc
+			}
+			size = 1 // gobble 1 byte
+		}
+		if elem(v)&tagNeedsFold == 0 {
+			if size != copy(dst[nDst:], src[nSrc:nSrc+size]) {
+				return nDst, nSrc, transform.ErrShortDst
+			}
+			nDst += size
+		} else {
+			data := inverseData[byte(v)]
+			if len(dst)-nDst < int(data[0]) {
+				return nDst, nSrc, transform.ErrShortDst
+			}
+			i := 1
+			for end := int(data[0]); i < end; i++ {
+				dst[nDst] = data[i]
+				nDst++
+			}
+			dst[nDst] = data[i] ^ src[nSrc+size-1]
+			nDst++
+		}
+		nSrc += size
+	}
+	return nDst, nSrc, nil
+}
+
+type narrowTransform struct {
+	transform.NopResetter
+}
+
+func (narrowTransform) Span(src []byte, atEOF bool) (n int, err error) {
+	for n < len(src) {
+		if src[n] < utf8.RuneSelf {
+			// ASCII fast path.
+			for n++; n < len(src) && src[n] < utf8.RuneSelf; n++ {
+			}
+			continue
+		}
+		v, size := trie.lookup(src[n:])
+		if size == 0 { // incomplete UTF-8 encoding
+			if !atEOF {
+				err = transform.ErrShortSrc
+			} else {
+				n = len(src)
+			}
+			break
+		}
+		if k := elem(v).kind(); byte(v) == 0 || k != EastAsianFullwidth && k != EastAsianWide && k != EastAsianAmbiguous {
+		} else {
+			err = transform.ErrEndOfSpan
+			break
+		}
+		n += size
+	}
+	return n, err
+}
+
+func (narrowTransform) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
+	for nSrc < len(src) {
+		if src[nSrc] < utf8.RuneSelf {
+			// ASCII fast path.
+			start, end := nSrc, len(src)
+			if d := len(dst) - nDst; d < end-start {
+				end = nSrc + d
+			}
+			for nSrc++; nSrc < end && src[nSrc] < utf8.RuneSelf; nSrc++ {
+			}
+			n := copy(dst[nDst:], src[start:nSrc])
+			if nDst += n; nDst == len(dst) {
+				nSrc = start + n
+				if nSrc == len(src) {
+					return nDst, nSrc, nil
+				}
+				if src[nSrc] < utf8.RuneSelf {
+					return nDst, nSrc, transform.ErrShortDst
+				}
+			}
+			continue
+		}
+		v, size := trie.lookup(src[nSrc:])
+		if size == 0 { // incomplete UTF-8 encoding
+			if !atEOF {
+				return nDst, nSrc, transform.ErrShortSrc
+			}
+			size = 1 // gobble 1 byte
+		}
+		if k := elem(v).kind(); byte(v) == 0 || k != EastAsianFullwidth && k != EastAsianWide && k != EastAsianAmbiguous {
+			if size != copy(dst[nDst:], src[nSrc:nSrc+size]) {
+				return nDst, nSrc, transform.ErrShortDst
+			}
+			nDst += size
+		} else {
+			data := inverseData[byte(v)]
+			if len(dst)-nDst < int(data[0]) {
+				return nDst, nSrc, transform.ErrShortDst
+			}
+			i := 1
+			for end := int(data[0]); i < end; i++ {
+				dst[nDst] = data[i]
+				nDst++
+			}
+			dst[nDst] = data[i] ^ src[nSrc+size-1]
+			nDst++
+		}
+		nSrc += size
+	}
+	return nDst, nSrc, nil
+}
+
+type wideTransform struct {
+	transform.NopResetter
+}
+
+func (wideTransform) Span(src []byte, atEOF bool) (n int, err error) {
+	for n < len(src) {
+		// TODO: Consider ASCII fast path. Special-casing ASCII handling can
+		// reduce the ns/op of BenchmarkWideASCII by about 30%. This is probably
+		// not enough to warrant the extra code and complexity.
+		v, size := trie.lookup(src[n:])
+		if size == 0 { // incomplete UTF-8 encoding
+			if !atEOF {
+				err = transform.ErrShortSrc
+			} else {
+				n = len(src)
+			}
+			break
+		}
+		if k := elem(v).kind(); byte(v) == 0 || k != EastAsianHalfwidth && k != EastAsianNarrow {
+		} else {
+			err = transform.ErrEndOfSpan
+			break
+		}
+		n += size
+	}
+	return n, err
+}
+
+func (wideTransform) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
+	for nSrc < len(src) {
+		// TODO: Consider ASCII fast path. Special-casing ASCII handling can
+		// reduce the ns/op of BenchmarkWideASCII by about 30%. This is probably
+		// not enough to warrant the extra code and complexity.
+		v, size := trie.lookup(src[nSrc:])
+		if size == 0 { // incomplete UTF-8 encoding
+			if !atEOF {
+				return nDst, nSrc, transform.ErrShortSrc
+			}
+			size = 1 // gobble 1 byte
+		}
+		if k := elem(v).kind(); byte(v) == 0 || k != EastAsianHalfwidth && k != EastAsianNarrow {
+			if size != copy(dst[nDst:], src[nSrc:nSrc+size]) {
+				return nDst, nSrc, transform.ErrShortDst
+			}
+			nDst += size
+		} else {
+			data := inverseData[byte(v)]
+			if len(dst)-nDst < int(data[0]) {
+				return nDst, nSrc, transform.ErrShortDst
+			}
+			i := 1
+			for end := int(data[0]); i < end; i++ {
+				dst[nDst] = data[i]
+				nDst++
+			}
+			dst[nDst] = data[i] ^ src[nSrc+size-1]
+			nDst++
+		}
+		nSrc += size
+	}
+	return nDst, nSrc, nil
+}
diff --git a/vendor/golang.org/x/text/width/trieval.go b/vendor/golang.org/x/text/width/trieval.go
new file mode 100644
index 00000000..ca8e45fd
--- /dev/null
+++ b/vendor/golang.org/x/text/width/trieval.go
@@ -0,0 +1,30 @@
+// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
+
+package width
+
+// elem is an entry of the width trie. The high byte is used to encode the type
+// of the rune. The low byte is used to store the index to a mapping entry in
+// the inverseData array.
+type elem uint16
+
+const (
+	tagNeutral elem = iota << typeShift
+	tagAmbiguous
+	tagWide
+	tagNarrow
+	tagFullwidth
+	tagHalfwidth
+)
+
+const (
+	numTypeBits = 3
+	typeShift   = 16 - numTypeBits
+
+	// tagNeedsFold is true for all fullwidth and halfwidth runes except for
+	// the Won sign U+20A9.
+	tagNeedsFold = 0x1000
+
+	// The Korean Won sign is halfwidth, but SHOULD NOT be mapped to a wide
+	// variant.
+	wonSign rune = 0x20A9
+)
diff --git a/vendor/golang.org/x/text/width/width.go b/vendor/golang.org/x/text/width/width.go
new file mode 100644
index 00000000..29c7509b
--- /dev/null
+++ b/vendor/golang.org/x/text/width/width.go
@@ -0,0 +1,206 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:generate stringer -type=Kind
+//go:generate go run gen.go gen_common.go gen_trieval.go
+
+// Package width provides functionality for handling different widths in text.
+//
+// Wide characters behave like ideographs; they tend to allow line breaks after
+// each character and remain upright in vertical text layout. Narrow characters
+// are kept together in words or runs that are rotated sideways in vertical text
+// layout.
+//
+// For more information, see https://unicode.org/reports/tr11/.
+package width // import "golang.org/x/text/width"
+
+import (
+	"unicode/utf8"
+
+	"golang.org/x/text/transform"
+)
+
+// TODO
+// 1) Reduce table size by compressing blocks.
+// 2) API proposition for computing display length
+//    (approximation, fixed pitch only).
+// 3) Implement display length.
+
+// Kind indicates the type of width property as defined in https://unicode.org/reports/tr11/.
+type Kind int
+
+const (
+	// Neutral characters do not occur in legacy East Asian character sets.
+	Neutral Kind = iota
+
+	// EastAsianAmbiguous characters that can be sometimes wide and sometimes
+	// narrow and require additional information not contained in the character
+	// code to further resolve their width.
+	EastAsianAmbiguous
+
+	// EastAsianWide characters are wide in its usual form. They occur only in
+	// the context of East Asian typography. These runes may have explicit
+	// halfwidth counterparts.
+	EastAsianWide
+
+	// EastAsianNarrow characters are narrow in its usual form. They often have
+	// fullwidth counterparts.
+	EastAsianNarrow
+
+	// Note: there exist Narrow runes that do not have fullwidth or wide
+	// counterparts, despite what the definition says (e.g. U+27E6).
+
+	// EastAsianFullwidth characters have a compatibility decompositions of type
+	// wide that map to a narrow counterpart.
+	EastAsianFullwidth
+
+	// EastAsianHalfwidth characters have a compatibility decomposition of type
+	// narrow that map to a wide or ambiguous counterpart, plus U+20A9 ₩ WON
+	// SIGN.
+	EastAsianHalfwidth
+
+	// Note: there exist runes that have a halfwidth counterparts but that are
+	// classified as Ambiguous, rather than wide (e.g. U+2190).
+)
+
+// TODO: the generated tries need to return size 1 for invalid runes for the
+// width to be computed correctly (each byte should render width 1)
+
+var trie = newWidthTrie(0)
+
+// Lookup reports the Properties of the first rune in b and the number of bytes
+// of its UTF-8 encoding.
+func Lookup(b []byte) (p Properties, size int) {
+	v, sz := trie.lookup(b)
+	return Properties{elem(v), b[sz-1]}, sz
+}
+
+// LookupString reports the Properties of the first rune in s and the number of
+// bytes of its UTF-8 encoding.
+func LookupString(s string) (p Properties, size int) {
+	v, sz := trie.lookupString(s)
+	return Properties{elem(v), s[sz-1]}, sz
+}
+
+// LookupRune reports the Properties of rune r.
+func LookupRune(r rune) Properties {
+	var buf [4]byte
+	n := utf8.EncodeRune(buf[:], r)
+	v, _ := trie.lookup(buf[:n])
+	last := byte(r)
+	if r >= utf8.RuneSelf {
+		last = 0x80 + byte(r&0x3f)
+	}
+	return Properties{elem(v), last}
+}
+
+// Properties provides access to width properties of a rune.
+type Properties struct {
+	elem elem
+	last byte
+}
+
+func (e elem) kind() Kind {
+	return Kind(e >> typeShift)
+}
+
+// Kind returns the Kind of a rune as defined in Unicode TR #11.
+// See https://unicode.org/reports/tr11/ for more details.
+func (p Properties) Kind() Kind {
+	return p.elem.kind()
+}
+
+// Folded returns the folded variant of a rune or 0 if the rune is canonical.
+func (p Properties) Folded() rune {
+	if p.elem&tagNeedsFold != 0 {
+		buf := inverseData[byte(p.elem)]
+		buf[buf[0]] ^= p.last
+		r, _ := utf8.DecodeRune(buf[1 : 1+buf[0]])
+		return r
+	}
+	return 0
+}
+
+// Narrow returns the narrow variant of a rune or 0 if the rune is already
+// narrow or doesn't have a narrow variant.
+func (p Properties) Narrow() rune {
+	if k := p.elem.kind(); byte(p.elem) != 0 && (k == EastAsianFullwidth || k == EastAsianWide || k == EastAsianAmbiguous) {
+		buf := inverseData[byte(p.elem)]
+		buf[buf[0]] ^= p.last
+		r, _ := utf8.DecodeRune(buf[1 : 1+buf[0]])
+		return r
+	}
+	return 0
+}
+
+// Wide returns the wide variant of a rune or 0 if the rune is already
+// wide or doesn't have a wide variant.
+func (p Properties) Wide() rune {
+	if k := p.elem.kind(); byte(p.elem) != 0 && (k == EastAsianHalfwidth || k == EastAsianNarrow) {
+		buf := inverseData[byte(p.elem)]
+		buf[buf[0]] ^= p.last
+		r, _ := utf8.DecodeRune(buf[1 : 1+buf[0]])
+		return r
+	}
+	return 0
+}
+
+// TODO for Properties:
+// - Add Fullwidth/Halfwidth or Inverted methods for computing variants
+// mapping.
+// - Add width information (including information on non-spacing runes).
+
+// Transformer implements the transform.Transformer interface.
+type Transformer struct {
+	t transform.SpanningTransformer
+}
+
+// Reset implements the transform.Transformer interface.
+func (t Transformer) Reset() { t.t.Reset() }
+
+// Transform implements the transform.Transformer interface.
+func (t Transformer) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
+	return t.t.Transform(dst, src, atEOF)
+}
+
+// Span implements the transform.SpanningTransformer interface.
+func (t Transformer) Span(src []byte, atEOF bool) (n int, err error) {
+	return t.t.Span(src, atEOF)
+}
+
+// Bytes returns a new byte slice with the result of applying t to b.
+func (t Transformer) Bytes(b []byte) []byte {
+	b, _, _ = transform.Bytes(t, b)
+	return b
+}
+
+// String returns a string with the result of applying t to s.
+func (t Transformer) String(s string) string {
+	s, _, _ = transform.String(t, s)
+	return s
+}
+
+var (
+	// Fold is a transform that maps all runes to their canonical width.
+	//
+	// Note that the NFKC and NFKD transforms in golang.org/x/text/unicode/norm
+	// provide a more generic folding mechanism.
+	Fold Transformer = Transformer{foldTransform{}}
+
+	// Widen is a transform that maps runes to their wide variant, if
+	// available.
+	Widen Transformer = Transformer{wideTransform{}}
+
+	// Narrow is a transform that maps runes to their narrow variant, if
+	// available.
+	Narrow Transformer = Transformer{narrowTransform{}}
+)
+
+// TODO: Consider the following options:
+// - Treat Ambiguous runes that have a halfwidth counterpart as wide, or some
+//   generalized variant of this.
+// - Consider a wide Won character to be the default width (or some generalized
+//   variant of this).
+// - Filter the set of characters that gets converted (the preferred approach is
+//   to allow applying filters to transforms).
diff --git a/vendor/google.golang.org/grpc/internal/internal.go b/vendor/google.golang.org/grpc/internal/internal.go
index b96b3597..eae18e18 100644
--- a/vendor/google.golang.org/grpc/internal/internal.go
+++ b/vendor/google.golang.org/grpc/internal/internal.go
@@ -60,7 +60,7 @@ var (
 //
 // The health checking protocol is defined at:
 // https://github.com/grpc/grpc/blob/master/doc/health-checking.md
-type HealthChecker func(ctx context.Context, newStream func(string) (interface{}, error), setConnectivityState func(connectivity.State), serviceName string) error
+type HealthChecker func(ctx context.Context, newStream func(string) (interface{}, error), setConnectivityState func(connectivity.State, error), serviceName string) error
 
 const (
 	// CredsBundleModeFallback switches GoogleDefaultCreds to fallback mode.
diff --git a/vendor/gopkg.in/yaml.v2/.travis.yml b/vendor/gopkg.in/yaml.v2/.travis.yml
index 9f556934..055480b9 100644
--- a/vendor/gopkg.in/yaml.v2/.travis.yml
+++ b/vendor/gopkg.in/yaml.v2/.travis.yml
@@ -1,12 +1,16 @@
 language: go
 
 go:
-    - 1.4
-    - 1.5
-    - 1.6
-    - 1.7
-    - 1.8
-    - 1.9
-    - tip
+    - "1.4.x"
+    - "1.5.x"
+    - "1.6.x"
+    - "1.7.x"
+    - "1.8.x"
+    - "1.9.x"
+    - "1.10.x"
+    - "1.11.x"
+    - "1.12.x"
+    - "1.13.x"
+    - "tip"
 
 go_import_path: gopkg.in/yaml.v2
diff --git a/vendor/gopkg.in/yaml.v2/scannerc.go b/vendor/gopkg.in/yaml.v2/scannerc.go
index 570b8ecd..0b9bb603 100644
--- a/vendor/gopkg.in/yaml.v2/scannerc.go
+++ b/vendor/gopkg.in/yaml.v2/scannerc.go
@@ -626,30 +626,17 @@ func trace(args ...interface{}) func() {
 func yaml_parser_fetch_more_tokens(parser *yaml_parser_t) bool {
 	// While we need more tokens to fetch, do it.
 	for {
-		// Check if we really need to fetch more tokens.
-		need_more_tokens := false
-
-		if parser.tokens_head == len(parser.tokens) {
-			// Queue is empty.
-			need_more_tokens = true
-		} else {
-			// Check if any potential simple key may occupy the head position.
-			if !yaml_parser_stale_simple_keys(parser) {
+		if parser.tokens_head != len(parser.tokens) {
+			// If queue is non-empty, check if any potential simple key may
+			// occupy the head position.
+			head_tok_idx, ok := parser.simple_keys_by_tok[parser.tokens_parsed]
+			if !ok {
+				break
+			} else if valid, ok := yaml_simple_key_is_valid(parser, &parser.simple_keys[head_tok_idx]); !ok {
 				return false
+			} else if !valid {
+				break
 			}
-
-			for i := range parser.simple_keys {
-				simple_key := &parser.simple_keys[i]
-				if simple_key.possible && simple_key.token_number == parser.tokens_parsed {
-					need_more_tokens = true
-					break
-				}
-			}
-		}
-
-		// We are finished.
-		if !need_more_tokens {
-			break
 		}
 		// Fetch the next token.
 		if !yaml_parser_fetch_next_token(parser) {
@@ -678,11 +665,6 @@ func yaml_parser_fetch_next_token(parser *yaml_parser_t) bool {
 		return false
 	}
 
-	// Remove obsolete potential simple keys.
-	if !yaml_parser_stale_simple_keys(parser) {
-		return false
-	}
-
 	// Check the indentation level against the current column.
 	if !yaml_parser_unroll_indent(parser, parser.mark.column) {
 		return false
@@ -837,29 +819,30 @@ func yaml_parser_fetch_next_token(parser *yaml_parser_t) bool {
 		"found character that cannot start any token")
 }
 
-// Check the list of potential simple keys and remove the positions that
-// cannot contain simple keys anymore.
-func yaml_parser_stale_simple_keys(parser *yaml_parser_t) bool {
-	// Check for a potential simple key for each flow level.
-	for i := range parser.simple_keys {
-		simple_key := &parser.simple_keys[i]
-
-		// The specification requires that a simple key
-		//
-		//  - is limited to a single line,
-		//  - is shorter than 1024 characters.
-		if simple_key.possible && (simple_key.mark.line < parser.mark.line || simple_key.mark.index+1024 < parser.mark.index) {
-
-			// Check if the potential simple key to be removed is required.
-			if simple_key.required {
-				return yaml_parser_set_scanner_error(parser,
-					"while scanning a simple key", simple_key.mark,
-					"could not find expected ':'")
-			}
-			simple_key.possible = false
-		}
+func yaml_simple_key_is_valid(parser *yaml_parser_t, simple_key *yaml_simple_key_t) (valid, ok bool) {
+	if !simple_key.possible {
+		return false, true
 	}
-	return true
+
+	// The 1.2 specification says:
+	//
+	//     "If the ? indicator is omitted, parsing needs to see past the
+	//     implicit key to recognize it as such. To limit the amount of
+	//     lookahead required, the “:” indicator must appear at most 1024
+	//     Unicode characters beyond the start of the key. In addition, the key
+	//     is restricted to a single line."
+	//
+	if simple_key.mark.line < parser.mark.line || simple_key.mark.index+1024 < parser.mark.index {
+		// Check if the potential simple key to be removed is required.
+		if simple_key.required {
+			return false, yaml_parser_set_scanner_error(parser,
+				"while scanning a simple key", simple_key.mark,
+				"could not find expected ':'")
+		}
+		simple_key.possible = false
+		return false, true
+	}
+	return true, true
 }
 
 // Check if a simple key may start at the current position and add it if
@@ -879,13 +862,14 @@ func yaml_parser_save_simple_key(parser *yaml_parser_t) bool {
 			possible:     true,
 			required:     required,
 			token_number: parser.tokens_parsed + (len(parser.tokens) - parser.tokens_head),
+			mark:         parser.mark,
 		}
-		simple_key.mark = parser.mark
 
 		if !yaml_parser_remove_simple_key(parser) {
 			return false
 		}
 		parser.simple_keys[len(parser.simple_keys)-1] = simple_key
+		parser.simple_keys_by_tok[simple_key.token_number] = len(parser.simple_keys) - 1
 	}
 	return true
 }
@@ -900,9 +884,10 @@ func yaml_parser_remove_simple_key(parser *yaml_parser_t) bool {
 				"while scanning a simple key", parser.simple_keys[i].mark,
 				"could not find expected ':'")
 		}
+		// Remove the key from the stack.
+		parser.simple_keys[i].possible = false
+		delete(parser.simple_keys_by_tok, parser.simple_keys[i].token_number)
 	}
-	// Remove the key from the stack.
-	parser.simple_keys[i].possible = false
 	return true
 }
 
@@ -912,7 +897,12 @@ const max_flow_level = 10000
 // Increase the flow level and resize the simple key list if needed.
 func yaml_parser_increase_flow_level(parser *yaml_parser_t) bool {
 	// Reset the simple key on the next level.
-	parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{})
+	parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{
+		possible:     false,
+		required:     false,
+		token_number: parser.tokens_parsed + (len(parser.tokens) - parser.tokens_head),
+		mark:         parser.mark,
+	})
 
 	// Increase the flow level.
 	parser.flow_level++
@@ -928,7 +918,9 @@ func yaml_parser_increase_flow_level(parser *yaml_parser_t) bool {
 func yaml_parser_decrease_flow_level(parser *yaml_parser_t) bool {
 	if parser.flow_level > 0 {
 		parser.flow_level--
-		parser.simple_keys = parser.simple_keys[:len(parser.simple_keys)-1]
+		last := len(parser.simple_keys) - 1
+		delete(parser.simple_keys_by_tok, parser.simple_keys[last].token_number)
+		parser.simple_keys = parser.simple_keys[:last]
 	}
 	return true
 }
@@ -1005,6 +997,8 @@ func yaml_parser_fetch_stream_start(parser *yaml_parser_t) bool {
 	// Initialize the simple key stack.
 	parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{})
 
+	parser.simple_keys_by_tok = make(map[int]int)
+
 	// A simple key is allowed at the beginning of the stream.
 	parser.simple_key_allowed = true
 
@@ -1286,7 +1280,11 @@ func yaml_parser_fetch_value(parser *yaml_parser_t) bool {
 	simple_key := &parser.simple_keys[len(parser.simple_keys)-1]
 
 	// Have we found a simple key?
-	if simple_key.possible {
+	if valid, ok := yaml_simple_key_is_valid(parser, simple_key); !ok {
+		return false
+
+	} else if valid {
+
 		// Create the KEY token and insert it into the queue.
 		token := yaml_token_t{
 			typ:        yaml_KEY_TOKEN,
@@ -1304,6 +1302,7 @@ func yaml_parser_fetch_value(parser *yaml_parser_t) bool {
 
 		// Remove the simple key.
 		simple_key.possible = false
+		delete(parser.simple_keys_by_tok, simple_key.token_number)
 
 		// A simple key cannot follow another simple key.
 		parser.simple_key_allowed = false
diff --git a/vendor/gopkg.in/yaml.v2/yaml.go b/vendor/gopkg.in/yaml.v2/yaml.go
index de85aa4c..89650e29 100644
--- a/vendor/gopkg.in/yaml.v2/yaml.go
+++ b/vendor/gopkg.in/yaml.v2/yaml.go
@@ -89,7 +89,7 @@ func UnmarshalStrict(in []byte, out interface{}) (err error) {
 	return unmarshal(in, out, true)
 }
 
-// A Decorder reads and decodes YAML values from an input stream.
+// A Decoder reads and decodes YAML values from an input stream.
 type Decoder struct {
 	strict bool
 	parser *parser
diff --git a/vendor/gopkg.in/yaml.v2/yamlh.go b/vendor/gopkg.in/yaml.v2/yamlh.go
index e25cee56..f6a9c8e3 100644
--- a/vendor/gopkg.in/yaml.v2/yamlh.go
+++ b/vendor/gopkg.in/yaml.v2/yamlh.go
@@ -579,6 +579,7 @@ type yaml_parser_t struct {
 
 	simple_key_allowed bool                // May a simple key occur at the current position?
 	simple_keys        []yaml_simple_key_t // The stack of simple keys.
+	simple_keys_by_tok map[int]int         // possible simple_key indexes indexed by token_number
 
 	// Parser stuff
 
diff --git a/vendor/k8s.io/api/admission/v1/doc.go b/vendor/k8s.io/api/admission/v1/doc.go
new file mode 100644
index 00000000..cbc6bb59
--- /dev/null
+++ b/vendor/k8s.io/api/admission/v1/doc.go
@@ -0,0 +1,23 @@
+/*
+Copyright 2019 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// +k8s:deepcopy-gen=package
+// +k8s:protobuf-gen=package
+// +k8s:openapi-gen=false
+
+// +groupName=admission.k8s.io
+
+package v1 // import "k8s.io/api/admission/v1"
diff --git a/vendor/k8s.io/api/admission/v1/generated.pb.go b/vendor/k8s.io/api/admission/v1/generated.pb.go
new file mode 100644
index 00000000..ed5b5dfe
--- /dev/null
+++ b/vendor/k8s.io/api/admission/v1/generated.pb.go
@@ -0,0 +1,1743 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by protoc-gen-gogo. DO NOT EDIT.
+// source: k8s.io/kubernetes/vendor/k8s.io/api/admission/v1/generated.proto
+
+package v1
+
+import (
+	fmt "fmt"
+
+	io "io"
+
+	proto "github.com/gogo/protobuf/proto"
+	github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys"
+	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+
+	math "math"
+	math_bits "math/bits"
+	reflect "reflect"
+	strings "strings"
+
+	k8s_io_apimachinery_pkg_types "k8s.io/apimachinery/pkg/types"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
+
+func (m *AdmissionRequest) Reset()      { *m = AdmissionRequest{} }
+func (*AdmissionRequest) ProtoMessage() {}
+func (*AdmissionRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_4b73421fd5edef9f, []int{0}
+}
+func (m *AdmissionRequest) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *AdmissionRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *AdmissionRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_AdmissionRequest.Merge(m, src)
+}
+func (m *AdmissionRequest) XXX_Size() int {
+	return m.Size()
+}
+func (m *AdmissionRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_AdmissionRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AdmissionRequest proto.InternalMessageInfo
+
+func (m *AdmissionResponse) Reset()      { *m = AdmissionResponse{} }
+func (*AdmissionResponse) ProtoMessage() {}
+func (*AdmissionResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_4b73421fd5edef9f, []int{1}
+}
+func (m *AdmissionResponse) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *AdmissionResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *AdmissionResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_AdmissionResponse.Merge(m, src)
+}
+func (m *AdmissionResponse) XXX_Size() int {
+	return m.Size()
+}
+func (m *AdmissionResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_AdmissionResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AdmissionResponse proto.InternalMessageInfo
+
+func (m *AdmissionReview) Reset()      { *m = AdmissionReview{} }
+func (*AdmissionReview) ProtoMessage() {}
+func (*AdmissionReview) Descriptor() ([]byte, []int) {
+	return fileDescriptor_4b73421fd5edef9f, []int{2}
+}
+func (m *AdmissionReview) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *AdmissionReview) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *AdmissionReview) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_AdmissionReview.Merge(m, src)
+}
+func (m *AdmissionReview) XXX_Size() int {
+	return m.Size()
+}
+func (m *AdmissionReview) XXX_DiscardUnknown() {
+	xxx_messageInfo_AdmissionReview.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AdmissionReview proto.InternalMessageInfo
+
+func init() {
+	proto.RegisterType((*AdmissionRequest)(nil), "k8s.io.api.admission.v1.AdmissionRequest")
+	proto.RegisterType((*AdmissionResponse)(nil), "k8s.io.api.admission.v1.AdmissionResponse")
+	proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.admission.v1.AdmissionResponse.AuditAnnotationsEntry")
+	proto.RegisterType((*AdmissionReview)(nil), "k8s.io.api.admission.v1.AdmissionReview")
+}
+
+func init() {
+	proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/admission/v1/generated.proto", fileDescriptor_4b73421fd5edef9f)
+}
+
+var fileDescriptor_4b73421fd5edef9f = []byte{
+	// 898 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x55, 0x4d, 0x6f, 0x1b, 0x45,
+	0x18, 0xf6, 0xc6, 0x89, 0xed, 0x1d, 0x87, 0xda, 0x9d, 0x82, 0x58, 0xf9, 0xb0, 0x36, 0x39, 0x20,
+	0x17, 0xb5, 0xb3, 0x24, 0x82, 0x2a, 0xaa, 0x38, 0x34, 0x4b, 0x2a, 0x14, 0x90, 0x9a, 0x68, 0xda,
+	0xa0, 0x8a, 0x03, 0xd2, 0xd8, 0x3b, 0xb5, 0x17, 0xdb, 0x33, 0xcb, 0xce, 0xac, 0x83, 0x6f, 0x9c,
+	0x38, 0xf3, 0x0f, 0xf8, 0x1d, 0xfc, 0x83, 0x1c, 0x7b, 0xec, 0xc9, 0x22, 0xe6, 0x5f, 0x44, 0x42,
+	0x42, 0x33, 0x3b, 0xfb, 0xd1, 0x7c, 0x88, 0xd0, 0xf4, 0xe4, 0x79, 0x3f, 0x9e, 0xe7, 0x7d, 0xfd,
+	0xbc, 0x3b, 0xef, 0x80, 0x27, 0x93, 0x5d, 0x81, 0x42, 0xee, 0x4d, 0x92, 0x01, 0x8d, 0x19, 0x95,
+	0x54, 0x78, 0x73, 0xca, 0x02, 0x1e, 0x7b, 0x26, 0x40, 0xa2, 0xd0, 0x23, 0xc1, 0x2c, 0x14, 0x22,
+	0xe4, 0xcc, 0x9b, 0x6f, 0x7b, 0x23, 0xca, 0x68, 0x4c, 0x24, 0x0d, 0x50, 0x14, 0x73, 0xc9, 0xe1,
+	0xc7, 0x69, 0x22, 0x22, 0x51, 0x88, 0xf2, 0x44, 0x34, 0xdf, 0xee, 0x3c, 0x1c, 0x85, 0x72, 0x9c,
+	0x0c, 0xd0, 0x90, 0xcf, 0xbc, 0x11, 0x1f, 0x71, 0x4f, 0xe7, 0x0f, 0x92, 0x57, 0xda, 0xd2, 0x86,
+	0x3e, 0xa5, 0x3c, 0x9d, 0x07, 0xe5, 0x82, 0x89, 0x1c, 0x53, 0x26, 0xc3, 0x21, 0x91, 0x57, 0x57,
+	0xed, 0x7c, 0x51, 0x64, 0xcf, 0xc8, 0x70, 0x1c, 0x32, 0x1a, 0x2f, 0xbc, 0x68, 0x32, 0x52, 0x0e,
+	0xe1, 0xcd, 0xa8, 0x24, 0x57, 0xa1, 0xbc, 0xeb, 0x50, 0x71, 0xc2, 0x64, 0x38, 0xa3, 0x97, 0x00,
+	0x8f, 0xfe, 0x0b, 0x20, 0x86, 0x63, 0x3a, 0x23, 0x17, 0x71, 0x5b, 0x7f, 0xd8, 0xa0, 0xbd, 0x97,
+	0x89, 0x81, 0xe9, 0xcf, 0x09, 0x15, 0x12, 0xfa, 0xa0, 0x9a, 0x84, 0x81, 0x63, 0xf5, 0xac, 0xbe,
+	0xed, 0x7f, 0x7e, 0xba, 0xec, 0x56, 0x56, 0xcb, 0x6e, 0xf5, 0xf8, 0x60, 0xff, 0x7c, 0xd9, 0xfd,
+	0xe4, 0xba, 0x42, 0x72, 0x11, 0x51, 0x81, 0x8e, 0x0f, 0xf6, 0xb1, 0x02, 0xc3, 0x97, 0x60, 0x7d,
+	0x12, 0xb2, 0xc0, 0x59, 0xeb, 0x59, 0xfd, 0xe6, 0xce, 0x23, 0x54, 0x88, 0x9f, 0xc3, 0x50, 0x34,
+	0x19, 0x29, 0x87, 0x40, 0x4a, 0x06, 0x34, 0xdf, 0x46, 0xdf, 0xc4, 0x3c, 0x89, 0xbe, 0xa7, 0xb1,
+	0x6a, 0xe6, 0xbb, 0x90, 0x05, 0xfe, 0xa6, 0x29, 0xbe, 0xae, 0x2c, 0xac, 0x19, 0xe1, 0x18, 0x34,
+	0x62, 0x2a, 0x78, 0x12, 0x0f, 0xa9, 0x53, 0xd5, 0xec, 0x8f, 0xff, 0x3f, 0x3b, 0x36, 0x0c, 0x7e,
+	0xdb, 0x54, 0x68, 0x64, 0x1e, 0x9c, 0xb3, 0xc3, 0x2f, 0x41, 0x53, 0x24, 0x83, 0x2c, 0xe0, 0xac,
+	0x6b, 0x3d, 0xee, 0x19, 0x40, 0xf3, 0x79, 0x11, 0xc2, 0xe5, 0x3c, 0x18, 0x82, 0x66, 0x9c, 0x2a,
+	0xa9, 0xba, 0x76, 0x3e, 0xb8, 0x95, 0x02, 0x2d, 0x55, 0x0a, 0x17, 0x74, 0xb8, 0xcc, 0x0d, 0x17,
+	0xa0, 0x65, 0xcc, 0xbc, 0xcb, 0x3b, 0xb7, 0x96, 0xe4, 0xde, 0x6a, 0xd9, 0x6d, 0xe1, 0xb7, 0x69,
+	0xf1, 0xc5, 0x3a, 0xf0, 0x5b, 0x00, 0x8d, 0xab, 0x24, 0x84, 0xd3, 0xd2, 0x1a, 0x75, 0x8c, 0x46,
+	0x10, 0x5f, 0xca, 0xc0, 0x57, 0xa0, 0x60, 0x0f, 0xac, 0x33, 0x32, 0xa3, 0xce, 0x86, 0x46, 0xe7,
+	0x43, 0x7f, 0x46, 0x66, 0x14, 0xeb, 0x08, 0xf4, 0x80, 0xad, 0x7e, 0x45, 0x44, 0x86, 0xd4, 0xa9,
+	0xe9, 0xb4, 0xbb, 0x26, 0xcd, 0x7e, 0x96, 0x05, 0x70, 0x91, 0x03, 0xbf, 0x02, 0x36, 0x8f, 0xd4,
+	0xa7, 0x1e, 0x72, 0xe6, 0xd4, 0x35, 0xc0, 0xcd, 0x00, 0x87, 0x59, 0xe0, 0xbc, 0x6c, 0xe0, 0x02,
+	0x00, 0x5f, 0x80, 0x46, 0x22, 0x68, 0x7c, 0xc0, 0x5e, 0x71, 0xa7, 0xa1, 0x05, 0xfd, 0x14, 0x95,
+	0xd7, 0xc7, 0x5b, 0xd7, 0x5e, 0x09, 0x79, 0x6c, 0xb2, 0x8b, 0xef, 0x29, 0xf3, 0xe0, 0x9c, 0x09,
+	0x1e, 0x83, 0x1a, 0x1f, 0xfc, 0x44, 0x87, 0xd2, 0xb1, 0x35, 0xe7, 0xc3, 0x6b, 0x87, 0x64, 0x6e,
+	0x2d, 0xc2, 0xe4, 0xe4, 0xe9, 0x2f, 0x92, 0x32, 0x35, 0x1f, 0xff, 0x8e, 0xa1, 0xae, 0x1d, 0x6a,
+	0x12, 0x6c, 0xc8, 0xe0, 0x8f, 0xc0, 0xe6, 0xd3, 0x20, 0x75, 0x3a, 0xe0, 0x5d, 0x98, 0x73, 0x29,
+	0x0f, 0x33, 0x1e, 0x5c, 0x50, 0xc2, 0x2d, 0x50, 0x0b, 0xe2, 0x05, 0x4e, 0x98, 0xd3, 0xec, 0x59,
+	0xfd, 0x86, 0x0f, 0x54, 0x0f, 0xfb, 0xda, 0x83, 0x4d, 0x04, 0xbe, 0x04, 0x75, 0x1e, 0x29, 0x31,
+	0x84, 0xb3, 0xf9, 0x2e, 0x1d, 0xb4, 0x4c, 0x07, 0xf5, 0xc3, 0x94, 0x05, 0x67, 0x74, 0x5b, 0xff,
+	0x54, 0xc1, 0xdd, 0xd2, 0x86, 0x12, 0x11, 0x67, 0x82, 0xbe, 0x97, 0x15, 0x75, 0x1f, 0xd4, 0xc9,
+	0x74, 0xca, 0x4f, 0x68, 0xba, 0xa5, 0x1a, 0x45, 0x13, 0x7b, 0xa9, 0x1b, 0x67, 0x71, 0x78, 0x04,
+	0x6a, 0x42, 0x12, 0x99, 0x08, 0xb3, 0x71, 0x1e, 0xdc, 0xec, 0x7a, 0x3d, 0xd7, 0x98, 0x54, 0x30,
+	0x4c, 0x45, 0x32, 0x95, 0xd8, 0xf0, 0xc0, 0x2e, 0xd8, 0x88, 0x88, 0x1c, 0x8e, 0xf5, 0x56, 0xd9,
+	0xf4, 0xed, 0xd5, 0xb2, 0xbb, 0x71, 0xa4, 0x1c, 0x38, 0xf5, 0xc3, 0x5d, 0x60, 0xeb, 0xc3, 0x8b,
+	0x45, 0x94, 0x5d, 0x8c, 0x8e, 0x1a, 0xd1, 0x51, 0xe6, 0x3c, 0x2f, 0x1b, 0xb8, 0x48, 0x86, 0xbf,
+	0x59, 0xa0, 0x4d, 0x92, 0x20, 0x94, 0x7b, 0x8c, 0x71, 0x49, 0xd2, 0xa9, 0xd4, 0x7a, 0xd5, 0x7e,
+	0x73, 0xe7, 0x09, 0xba, 0xe6, 0x11, 0x44, 0x97, 0x24, 0x46, 0x7b, 0x17, 0x28, 0x9e, 0x32, 0x19,
+	0x2f, 0x7c, 0xc7, 0x68, 0xd4, 0xbe, 0x18, 0xc6, 0x97, 0x6a, 0x76, 0xbe, 0x06, 0x1f, 0x5d, 0x49,
+	0x02, 0xdb, 0xa0, 0x3a, 0xa1, 0x8b, 0x74, 0x7a, 0x58, 0x1d, 0xe1, 0x87, 0x60, 0x63, 0x4e, 0xa6,
+	0x09, 0xd5, 0x93, 0xb0, 0x71, 0x6a, 0x3c, 0x5e, 0xdb, 0xb5, 0xb6, 0xfe, 0xb4, 0x40, 0xab, 0xd4,
+	0xdc, 0x3c, 0xa4, 0x27, 0xf0, 0x08, 0xd4, 0xcd, 0x16, 0xd1, 0x1c, 0xcd, 0x9d, 0xfb, 0x37, 0xf9,
+	0x5f, 0x1a, 0xe0, 0x37, 0xd5, 0x80, 0xb3, 0xed, 0x96, 0xd1, 0xa8, 0x0b, 0x1f, 0x9b, 0x3f, 0x6e,
+	0x9e, 0xac, 0xcf, 0x6e, 0x2e, 0x95, 0xbf, 0x69, 0x1e, 0x10, 0x6d, 0xe1, 0x9c, 0xc9, 0xef, 0x9f,
+	0x9e, 0xb9, 0x95, 0xd7, 0x67, 0x6e, 0xe5, 0xcd, 0x99, 0x5b, 0xf9, 0x75, 0xe5, 0x5a, 0xa7, 0x2b,
+	0xd7, 0x7a, 0xbd, 0x72, 0xad, 0x37, 0x2b, 0xd7, 0xfa, 0x6b, 0xe5, 0x5a, 0xbf, 0xff, 0xed, 0x56,
+	0x7e, 0x58, 0x9b, 0x6f, 0xff, 0x1b, 0x00, 0x00, 0xff, 0xff, 0x37, 0xc7, 0x3f, 0x71, 0xdf, 0x08,
+	0x00, 0x00,
+}
+
+func (m *AdmissionRequest) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *AdmissionRequest) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *AdmissionRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	i -= len(m.RequestSubResource)
+	copy(dAtA[i:], m.RequestSubResource)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.RequestSubResource)))
+	i--
+	dAtA[i] = 0x7a
+	if m.RequestResource != nil {
+		{
+			size, err := m.RequestResource.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x72
+	}
+	if m.RequestKind != nil {
+		{
+			size, err := m.RequestKind.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x6a
+	}
+	{
+		size, err := m.Options.MarshalToSizedBuffer(dAtA[:i])
+		if err != nil {
+			return 0, err
+		}
+		i -= size
+		i = encodeVarintGenerated(dAtA, i, uint64(size))
+	}
+	i--
+	dAtA[i] = 0x62
+	if m.DryRun != nil {
+		i--
+		if *m.DryRun {
+			dAtA[i] = 1
+		} else {
+			dAtA[i] = 0
+		}
+		i--
+		dAtA[i] = 0x58
+	}
+	{
+		size, err := m.OldObject.MarshalToSizedBuffer(dAtA[:i])
+		if err != nil {
+			return 0, err
+		}
+		i -= size
+		i = encodeVarintGenerated(dAtA, i, uint64(size))
+	}
+	i--
+	dAtA[i] = 0x52
+	{
+		size, err := m.Object.MarshalToSizedBuffer(dAtA[:i])
+		if err != nil {
+			return 0, err
+		}
+		i -= size
+		i = encodeVarintGenerated(dAtA, i, uint64(size))
+	}
+	i--
+	dAtA[i] = 0x4a
+	{
+		size, err := m.UserInfo.MarshalToSizedBuffer(dAtA[:i])
+		if err != nil {
+			return 0, err
+		}
+		i -= size
+		i = encodeVarintGenerated(dAtA, i, uint64(size))
+	}
+	i--
+	dAtA[i] = 0x42
+	i -= len(m.Operation)
+	copy(dAtA[i:], m.Operation)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Operation)))
+	i--
+	dAtA[i] = 0x3a
+	i -= len(m.Namespace)
+	copy(dAtA[i:], m.Namespace)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace)))
+	i--
+	dAtA[i] = 0x32
+	i -= len(m.Name)
+	copy(dAtA[i:], m.Name)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name)))
+	i--
+	dAtA[i] = 0x2a
+	i -= len(m.SubResource)
+	copy(dAtA[i:], m.SubResource)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.SubResource)))
+	i--
+	dAtA[i] = 0x22
+	{
+		size, err := m.Resource.MarshalToSizedBuffer(dAtA[:i])
+		if err != nil {
+			return 0, err
+		}
+		i -= size
+		i = encodeVarintGenerated(dAtA, i, uint64(size))
+	}
+	i--
+	dAtA[i] = 0x1a
+	{
+		size, err := m.Kind.MarshalToSizedBuffer(dAtA[:i])
+		if err != nil {
+			return 0, err
+		}
+		i -= size
+		i = encodeVarintGenerated(dAtA, i, uint64(size))
+	}
+	i--
+	dAtA[i] = 0x12
+	i -= len(m.UID)
+	copy(dAtA[i:], m.UID)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID)))
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *AdmissionResponse) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *AdmissionResponse) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *AdmissionResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if len(m.AuditAnnotations) > 0 {
+		keysForAuditAnnotations := make([]string, 0, len(m.AuditAnnotations))
+		for k := range m.AuditAnnotations {
+			keysForAuditAnnotations = append(keysForAuditAnnotations, string(k))
+		}
+		github_com_gogo_protobuf_sortkeys.Strings(keysForAuditAnnotations)
+		for iNdEx := len(keysForAuditAnnotations) - 1; iNdEx >= 0; iNdEx-- {
+			v := m.AuditAnnotations[string(keysForAuditAnnotations[iNdEx])]
+			baseI := i
+			i -= len(v)
+			copy(dAtA[i:], v)
+			i = encodeVarintGenerated(dAtA, i, uint64(len(v)))
+			i--
+			dAtA[i] = 0x12
+			i -= len(keysForAuditAnnotations[iNdEx])
+			copy(dAtA[i:], keysForAuditAnnotations[iNdEx])
+			i = encodeVarintGenerated(dAtA, i, uint64(len(keysForAuditAnnotations[iNdEx])))
+			i--
+			dAtA[i] = 0xa
+			i = encodeVarintGenerated(dAtA, i, uint64(baseI-i))
+			i--
+			dAtA[i] = 0x32
+		}
+	}
+	if m.PatchType != nil {
+		i -= len(*m.PatchType)
+		copy(dAtA[i:], *m.PatchType)
+		i = encodeVarintGenerated(dAtA, i, uint64(len(*m.PatchType)))
+		i--
+		dAtA[i] = 0x2a
+	}
+	if m.Patch != nil {
+		i -= len(m.Patch)
+		copy(dAtA[i:], m.Patch)
+		i = encodeVarintGenerated(dAtA, i, uint64(len(m.Patch)))
+		i--
+		dAtA[i] = 0x22
+	}
+	if m.Result != nil {
+		{
+			size, err := m.Result.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x1a
+	}
+	i--
+	if m.Allowed {
+		dAtA[i] = 1
+	} else {
+		dAtA[i] = 0
+	}
+	i--
+	dAtA[i] = 0x10
+	i -= len(m.UID)
+	copy(dAtA[i:], m.UID)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID)))
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *AdmissionReview) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *AdmissionReview) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *AdmissionReview) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.Response != nil {
+		{
+			size, err := m.Response.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x12
+	}
+	if m.Request != nil {
+		{
+			size, err := m.Request.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+
+func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int {
+	offset -= sovGenerated(v)
+	base := offset
+	for v >= 1<<7 {
+		dAtA[offset] = uint8(v&0x7f | 0x80)
+		v >>= 7
+		offset++
+	}
+	dAtA[offset] = uint8(v)
+	return base
+}
+func (m *AdmissionRequest) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.UID)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = m.Kind.Size()
+	n += 1 + l + sovGenerated(uint64(l))
+	l = m.Resource.Size()
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.SubResource)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.Name)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.Namespace)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.Operation)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = m.UserInfo.Size()
+	n += 1 + l + sovGenerated(uint64(l))
+	l = m.Object.Size()
+	n += 1 + l + sovGenerated(uint64(l))
+	l = m.OldObject.Size()
+	n += 1 + l + sovGenerated(uint64(l))
+	if m.DryRun != nil {
+		n += 2
+	}
+	l = m.Options.Size()
+	n += 1 + l + sovGenerated(uint64(l))
+	if m.RequestKind != nil {
+		l = m.RequestKind.Size()
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	if m.RequestResource != nil {
+		l = m.RequestResource.Size()
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	l = len(m.RequestSubResource)
+	n += 1 + l + sovGenerated(uint64(l))
+	return n
+}
+
+func (m *AdmissionResponse) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.UID)
+	n += 1 + l + sovGenerated(uint64(l))
+	n += 2
+	if m.Result != nil {
+		l = m.Result.Size()
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	if m.Patch != nil {
+		l = len(m.Patch)
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	if m.PatchType != nil {
+		l = len(*m.PatchType)
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	if len(m.AuditAnnotations) > 0 {
+		for k, v := range m.AuditAnnotations {
+			_ = k
+			_ = v
+			mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v)))
+			n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize))
+		}
+	}
+	return n
+}
+
+func (m *AdmissionReview) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.Request != nil {
+		l = m.Request.Size()
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	if m.Response != nil {
+		l = m.Response.Size()
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	return n
+}
+
+func sovGenerated(x uint64) (n int) {
+	return (math_bits.Len64(x|1) + 6) / 7
+}
+func sozGenerated(x uint64) (n int) {
+	return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (this *AdmissionRequest) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&AdmissionRequest{`,
+		`UID:` + fmt.Sprintf("%v", this.UID) + `,`,
+		`Kind:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Kind), "GroupVersionKind", "v1.GroupVersionKind", 1), `&`, ``, 1) + `,`,
+		`Resource:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Resource), "GroupVersionResource", "v1.GroupVersionResource", 1), `&`, ``, 1) + `,`,
+		`SubResource:` + fmt.Sprintf("%v", this.SubResource) + `,`,
+		`Name:` + fmt.Sprintf("%v", this.Name) + `,`,
+		`Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`,
+		`Operation:` + fmt.Sprintf("%v", this.Operation) + `,`,
+		`UserInfo:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.UserInfo), "UserInfo", "v11.UserInfo", 1), `&`, ``, 1) + `,`,
+		`Object:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Object), "RawExtension", "runtime.RawExtension", 1), `&`, ``, 1) + `,`,
+		`OldObject:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.OldObject), "RawExtension", "runtime.RawExtension", 1), `&`, ``, 1) + `,`,
+		`DryRun:` + valueToStringGenerated(this.DryRun) + `,`,
+		`Options:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Options), "RawExtension", "runtime.RawExtension", 1), `&`, ``, 1) + `,`,
+		`RequestKind:` + strings.Replace(fmt.Sprintf("%v", this.RequestKind), "GroupVersionKind", "v1.GroupVersionKind", 1) + `,`,
+		`RequestResource:` + strings.Replace(fmt.Sprintf("%v", this.RequestResource), "GroupVersionResource", "v1.GroupVersionResource", 1) + `,`,
+		`RequestSubResource:` + fmt.Sprintf("%v", this.RequestSubResource) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *AdmissionResponse) String() string {
+	if this == nil {
+		return "nil"
+	}
+	keysForAuditAnnotations := make([]string, 0, len(this.AuditAnnotations))
+	for k := range this.AuditAnnotations {
+		keysForAuditAnnotations = append(keysForAuditAnnotations, k)
+	}
+	github_com_gogo_protobuf_sortkeys.Strings(keysForAuditAnnotations)
+	mapStringForAuditAnnotations := "map[string]string{"
+	for _, k := range keysForAuditAnnotations {
+		mapStringForAuditAnnotations += fmt.Sprintf("%v: %v,", k, this.AuditAnnotations[k])
+	}
+	mapStringForAuditAnnotations += "}"
+	s := strings.Join([]string{`&AdmissionResponse{`,
+		`UID:` + fmt.Sprintf("%v", this.UID) + `,`,
+		`Allowed:` + fmt.Sprintf("%v", this.Allowed) + `,`,
+		`Result:` + strings.Replace(fmt.Sprintf("%v", this.Result), "Status", "v1.Status", 1) + `,`,
+		`Patch:` + valueToStringGenerated(this.Patch) + `,`,
+		`PatchType:` + valueToStringGenerated(this.PatchType) + `,`,
+		`AuditAnnotations:` + mapStringForAuditAnnotations + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *AdmissionReview) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&AdmissionReview{`,
+		`Request:` + strings.Replace(this.Request.String(), "AdmissionRequest", "AdmissionRequest", 1) + `,`,
+		`Response:` + strings.Replace(this.Response.String(), "AdmissionResponse", "AdmissionResponse", 1) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func valueToStringGenerated(v interface{}) string {
+	rv := reflect.ValueOf(v)
+	if rv.IsNil() {
+		return "nil"
+	}
+	pv := reflect.Indirect(rv).Interface()
+	return fmt.Sprintf("*%v", pv)
+}
+func (m *AdmissionRequest) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: AdmissionRequest: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: AdmissionRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field UID", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.UID = k8s_io_apimachinery_pkg_types.UID(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := m.Kind.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := m.Resource.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 4:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field SubResource", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.SubResource = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 5:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Name = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 6:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Namespace = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 7:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Operation", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Operation = Operation(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 8:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field UserInfo", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := m.UserInfo.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 9:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Object", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := m.Object.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 10:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field OldObject", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := m.OldObject.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 11:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field DryRun", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			b := bool(v != 0)
+			m.DryRun = &b
+		case 12:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Options", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := m.Options.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 13:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field RequestKind", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.RequestKind == nil {
+				m.RequestKind = &v1.GroupVersionKind{}
+			}
+			if err := m.RequestKind.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 14:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field RequestResource", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.RequestResource == nil {
+				m.RequestResource = &v1.GroupVersionResource{}
+			}
+			if err := m.RequestResource.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 15:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field RequestSubResource", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.RequestSubResource = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *AdmissionResponse) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: AdmissionResponse: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: AdmissionResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field UID", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.UID = k8s_io_apimachinery_pkg_types.UID(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Allowed", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.Allowed = bool(v != 0)
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Result", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Result == nil {
+				m.Result = &v1.Status{}
+			}
+			if err := m.Result.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 4:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Patch", wireType)
+			}
+			var byteLen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				byteLen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if byteLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + byteLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Patch = append(m.Patch[:0], dAtA[iNdEx:postIndex]...)
+			if m.Patch == nil {
+				m.Patch = []byte{}
+			}
+			iNdEx = postIndex
+		case 5:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field PatchType", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			s := PatchType(dAtA[iNdEx:postIndex])
+			m.PatchType = &s
+			iNdEx = postIndex
+		case 6:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field AuditAnnotations", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.AuditAnnotations == nil {
+				m.AuditAnnotations = make(map[string]string)
+			}
+			var mapkey string
+			var mapvalue string
+			for iNdEx < postIndex {
+				entryPreIndex := iNdEx
+				var wire uint64
+				for shift := uint(0); ; shift += 7 {
+					if shift >= 64 {
+						return ErrIntOverflowGenerated
+					}
+					if iNdEx >= l {
+						return io.ErrUnexpectedEOF
+					}
+					b := dAtA[iNdEx]
+					iNdEx++
+					wire |= uint64(b&0x7F) << shift
+					if b < 0x80 {
+						break
+					}
+				}
+				fieldNum := int32(wire >> 3)
+				if fieldNum == 1 {
+					var stringLenmapkey uint64
+					for shift := uint(0); ; shift += 7 {
+						if shift >= 64 {
+							return ErrIntOverflowGenerated
+						}
+						if iNdEx >= l {
+							return io.ErrUnexpectedEOF
+						}
+						b := dAtA[iNdEx]
+						iNdEx++
+						stringLenmapkey |= uint64(b&0x7F) << shift
+						if b < 0x80 {
+							break
+						}
+					}
+					intStringLenmapkey := int(stringLenmapkey)
+					if intStringLenmapkey < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					postStringIndexmapkey := iNdEx + intStringLenmapkey
+					if postStringIndexmapkey < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					if postStringIndexmapkey > l {
+						return io.ErrUnexpectedEOF
+					}
+					mapkey = string(dAtA[iNdEx:postStringIndexmapkey])
+					iNdEx = postStringIndexmapkey
+				} else if fieldNum == 2 {
+					var stringLenmapvalue uint64
+					for shift := uint(0); ; shift += 7 {
+						if shift >= 64 {
+							return ErrIntOverflowGenerated
+						}
+						if iNdEx >= l {
+							return io.ErrUnexpectedEOF
+						}
+						b := dAtA[iNdEx]
+						iNdEx++
+						stringLenmapvalue |= uint64(b&0x7F) << shift
+						if b < 0x80 {
+							break
+						}
+					}
+					intStringLenmapvalue := int(stringLenmapvalue)
+					if intStringLenmapvalue < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					postStringIndexmapvalue := iNdEx + intStringLenmapvalue
+					if postStringIndexmapvalue < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					if postStringIndexmapvalue > l {
+						return io.ErrUnexpectedEOF
+					}
+					mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue])
+					iNdEx = postStringIndexmapvalue
+				} else {
+					iNdEx = entryPreIndex
+					skippy, err := skipGenerated(dAtA[iNdEx:])
+					if err != nil {
+						return err
+					}
+					if skippy < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					if (iNdEx + skippy) > postIndex {
+						return io.ErrUnexpectedEOF
+					}
+					iNdEx += skippy
+				}
+			}
+			m.AuditAnnotations[mapkey] = mapvalue
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *AdmissionReview) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: AdmissionReview: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: AdmissionReview: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Request", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Request == nil {
+				m.Request = &AdmissionRequest{}
+			}
+			if err := m.Request.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Response", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Response == nil {
+				m.Response = &AdmissionResponse{}
+			}
+			if err := m.Response.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func skipGenerated(dAtA []byte) (n int, err error) {
+	l := len(dAtA)
+	iNdEx := 0
+	depth := 0
+	for iNdEx < l {
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return 0, ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return 0, io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		wireType := int(wire & 0x7)
+		switch wireType {
+		case 0:
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return 0, ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return 0, io.ErrUnexpectedEOF
+				}
+				iNdEx++
+				if dAtA[iNdEx-1] < 0x80 {
+					break
+				}
+			}
+		case 1:
+			iNdEx += 8
+		case 2:
+			var length int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return 0, ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return 0, io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				length |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if length < 0 {
+				return 0, ErrInvalidLengthGenerated
+			}
+			iNdEx += length
+		case 3:
+			depth++
+		case 4:
+			if depth == 0 {
+				return 0, ErrUnexpectedEndOfGroupGenerated
+			}
+			depth--
+		case 5:
+			iNdEx += 4
+		default:
+			return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+		}
+		if iNdEx < 0 {
+			return 0, ErrInvalidLengthGenerated
+		}
+		if depth == 0 {
+			return iNdEx, nil
+		}
+	}
+	return 0, io.ErrUnexpectedEOF
+}
+
+var (
+	ErrInvalidLengthGenerated        = fmt.Errorf("proto: negative length found during unmarshaling")
+	ErrIntOverflowGenerated          = fmt.Errorf("proto: integer overflow")
+	ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group")
+)
diff --git a/vendor/k8s.io/api/admission/v1/generated.proto b/vendor/k8s.io/api/admission/v1/generated.proto
new file mode 100644
index 00000000..8d960a17
--- /dev/null
+++ b/vendor/k8s.io/api/admission/v1/generated.proto
@@ -0,0 +1,160 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+
+// This file was autogenerated by go-to-protobuf. Do not edit it manually!
+
+syntax = 'proto2';
+
+package k8s.io.api.admission.v1;
+
+import "k8s.io/api/authentication/v1/generated.proto";
+import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto";
+import "k8s.io/apimachinery/pkg/runtime/generated.proto";
+import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto";
+
+// Package-wide variables from generator "generated".
+option go_package = "v1";
+
+// AdmissionRequest describes the admission.Attributes for the admission request.
+message AdmissionRequest {
+  // UID is an identifier for the individual request/response. It allows us to distinguish instances of requests which are
+  // otherwise identical (parallel requests, requests when earlier requests did not modify etc)
+  // The UID is meant to track the round trip (request/response) between the KAS and the WebHook, not the user request.
+  // It is suitable for correlating log entries between the webhook and apiserver, for either auditing or debugging.
+  optional string uid = 1;
+
+  // Kind is the fully-qualified type of object being submitted (for example, v1.Pod or autoscaling.v1.Scale)
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersionKind kind = 2;
+
+  // Resource is the fully-qualified resource being requested (for example, v1.pods)
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersionResource resource = 3;
+
+  // SubResource is the subresource being requested, if any (for example, "status" or "scale")
+  // +optional
+  optional string subResource = 4;
+
+  // RequestKind is the fully-qualified type of the original API request (for example, v1.Pod or autoscaling.v1.Scale).
+  // If this is specified and differs from the value in "kind", an equivalent match and conversion was performed.
+  //
+  // For example, if deployments can be modified via apps/v1 and apps/v1beta1, and a webhook registered a rule of
+  // `apiGroups:["apps"], apiVersions:["v1"], resources: ["deployments"]` and `matchPolicy: Equivalent`,
+  // an API request to apps/v1beta1 deployments would be converted and sent to the webhook
+  // with `kind: {group:"apps", version:"v1", kind:"Deployment"}` (matching the rule the webhook registered for),
+  // and `requestKind: {group:"apps", version:"v1beta1", kind:"Deployment"}` (indicating the kind of the original API request).
+  //
+  // See documentation for the "matchPolicy" field in the webhook configuration type for more details.
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersionKind requestKind = 13;
+
+  // RequestResource is the fully-qualified resource of the original API request (for example, v1.pods).
+  // If this is specified and differs from the value in "resource", an equivalent match and conversion was performed.
+  //
+  // For example, if deployments can be modified via apps/v1 and apps/v1beta1, and a webhook registered a rule of
+  // `apiGroups:["apps"], apiVersions:["v1"], resources: ["deployments"]` and `matchPolicy: Equivalent`,
+  // an API request to apps/v1beta1 deployments would be converted and sent to the webhook
+  // with `resource: {group:"apps", version:"v1", resource:"deployments"}` (matching the resource the webhook registered for),
+  // and `requestResource: {group:"apps", version:"v1beta1", resource:"deployments"}` (indicating the resource of the original API request).
+  //
+  // See documentation for the "matchPolicy" field in the webhook configuration type.
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersionResource requestResource = 14;
+
+  // RequestSubResource is the name of the subresource of the original API request, if any (for example, "status" or "scale")
+  // If this is specified and differs from the value in "subResource", an equivalent match and conversion was performed.
+  // See documentation for the "matchPolicy" field in the webhook configuration type.
+  // +optional
+  optional string requestSubResource = 15;
+
+  // Name is the name of the object as presented in the request.  On a CREATE operation, the client may omit name and
+  // rely on the server to generate the name.  If that is the case, this field will contain an empty string.
+  // +optional
+  optional string name = 5;
+
+  // Namespace is the namespace associated with the request (if any).
+  // +optional
+  optional string namespace = 6;
+
+  // Operation is the operation being performed. This may be different than the operation
+  // requested. e.g. a patch can result in either a CREATE or UPDATE Operation.
+  optional string operation = 7;
+
+  // UserInfo is information about the requesting user
+  optional k8s.io.api.authentication.v1.UserInfo userInfo = 8;
+
+  // Object is the object from the incoming request.
+  // +optional
+  optional k8s.io.apimachinery.pkg.runtime.RawExtension object = 9;
+
+  // OldObject is the existing object. Only populated for DELETE and UPDATE requests.
+  // +optional
+  optional k8s.io.apimachinery.pkg.runtime.RawExtension oldObject = 10;
+
+  // DryRun indicates that modifications will definitely not be persisted for this request.
+  // Defaults to false.
+  // +optional
+  optional bool dryRun = 11;
+
+  // Options is the operation option structure of the operation being performed.
+  // e.g. `meta.k8s.io/v1.DeleteOptions` or `meta.k8s.io/v1.CreateOptions`. This may be
+  // different than the options the caller provided. e.g. for a patch request the performed
+  // Operation might be a CREATE, in which case the Options will a
+  // `meta.k8s.io/v1.CreateOptions` even though the caller provided `meta.k8s.io/v1.PatchOptions`.
+  // +optional
+  optional k8s.io.apimachinery.pkg.runtime.RawExtension options = 12;
+}
+
+// AdmissionResponse describes an admission response.
+message AdmissionResponse {
+  // UID is an identifier for the individual request/response.
+  // This must be copied over from the corresponding AdmissionRequest.
+  optional string uid = 1;
+
+  // Allowed indicates whether or not the admission request was permitted.
+  optional bool allowed = 2;
+
+  // Result contains extra details into why an admission request was denied.
+  // This field IS NOT consulted in any way if "Allowed" is "true".
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.Status status = 3;
+
+  // The patch body. Currently we only support "JSONPatch" which implements RFC 6902.
+  // +optional
+  optional bytes patch = 4;
+
+  // The type of Patch. Currently we only allow "JSONPatch".
+  // +optional
+  optional string patchType = 5;
+
+  // AuditAnnotations is an unstructured key value map set by remote admission controller (e.g. error=image-blacklisted).
+  // MutatingAdmissionWebhook and ValidatingAdmissionWebhook admission controller will prefix the keys with
+  // admission webhook name (e.g. imagepolicy.example.com/error=image-blacklisted). AuditAnnotations will be provided by
+  // the admission webhook to add additional context to the audit log for this request.
+  // +optional
+  map<string, string> auditAnnotations = 6;
+}
+
+// AdmissionReview describes an admission review request/response.
+message AdmissionReview {
+  // Request describes the attributes for the admission request.
+  // +optional
+  optional AdmissionRequest request = 1;
+
+  // Response describes the attributes for the admission response.
+  // +optional
+  optional AdmissionResponse response = 2;
+}
+
diff --git a/vendor/k8s.io/api/admission/v1/register.go b/vendor/k8s.io/api/admission/v1/register.go
new file mode 100644
index 00000000..b548509a
--- /dev/null
+++ b/vendor/k8s.io/api/admission/v1/register.go
@@ -0,0 +1,51 @@
+/*
+Copyright 2019 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import (
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	"k8s.io/apimachinery/pkg/runtime"
+	"k8s.io/apimachinery/pkg/runtime/schema"
+)
+
+// GroupName is the group name for this API.
+const GroupName = "admission.k8s.io"
+
+// SchemeGroupVersion is group version used to register these objects
+var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"}
+
+// Resource takes an unqualified resource and returns a Group qualified GroupResource
+func Resource(resource string) schema.GroupResource {
+	return SchemeGroupVersion.WithResource(resource).GroupResource()
+}
+
+var (
+	// TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api.
+	// localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes.
+	SchemeBuilder      = runtime.NewSchemeBuilder(addKnownTypes)
+	localSchemeBuilder = &SchemeBuilder
+	AddToScheme        = localSchemeBuilder.AddToScheme
+)
+
+// Adds the list of known types to the given scheme.
+func addKnownTypes(scheme *runtime.Scheme) error {
+	scheme.AddKnownTypes(SchemeGroupVersion,
+		&AdmissionReview{},
+	)
+	metav1.AddToGroupVersion(scheme, SchemeGroupVersion)
+	return nil
+}
diff --git a/vendor/k8s.io/api/admission/v1/types.go b/vendor/k8s.io/api/admission/v1/types.go
new file mode 100644
index 00000000..a40cb0d5
--- /dev/null
+++ b/vendor/k8s.io/api/admission/v1/types.go
@@ -0,0 +1,162 @@
+/*
+Copyright 2019 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import (
+	authenticationv1 "k8s.io/api/authentication/v1"
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	"k8s.io/apimachinery/pkg/runtime"
+	"k8s.io/apimachinery/pkg/types"
+)
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// AdmissionReview describes an admission review request/response.
+type AdmissionReview struct {
+	metav1.TypeMeta `json:",inline"`
+	// Request describes the attributes for the admission request.
+	// +optional
+	Request *AdmissionRequest `json:"request,omitempty" protobuf:"bytes,1,opt,name=request"`
+	// Response describes the attributes for the admission response.
+	// +optional
+	Response *AdmissionResponse `json:"response,omitempty" protobuf:"bytes,2,opt,name=response"`
+}
+
+// AdmissionRequest describes the admission.Attributes for the admission request.
+type AdmissionRequest struct {
+	// UID is an identifier for the individual request/response. It allows us to distinguish instances of requests which are
+	// otherwise identical (parallel requests, requests when earlier requests did not modify etc)
+	// The UID is meant to track the round trip (request/response) between the KAS and the WebHook, not the user request.
+	// It is suitable for correlating log entries between the webhook and apiserver, for either auditing or debugging.
+	UID types.UID `json:"uid" protobuf:"bytes,1,opt,name=uid"`
+	// Kind is the fully-qualified type of object being submitted (for example, v1.Pod or autoscaling.v1.Scale)
+	Kind metav1.GroupVersionKind `json:"kind" protobuf:"bytes,2,opt,name=kind"`
+	// Resource is the fully-qualified resource being requested (for example, v1.pods)
+	Resource metav1.GroupVersionResource `json:"resource" protobuf:"bytes,3,opt,name=resource"`
+	// SubResource is the subresource being requested, if any (for example, "status" or "scale")
+	// +optional
+	SubResource string `json:"subResource,omitempty" protobuf:"bytes,4,opt,name=subResource"`
+
+	// RequestKind is the fully-qualified type of the original API request (for example, v1.Pod or autoscaling.v1.Scale).
+	// If this is specified and differs from the value in "kind", an equivalent match and conversion was performed.
+	//
+	// For example, if deployments can be modified via apps/v1 and apps/v1beta1, and a webhook registered a rule of
+	// `apiGroups:["apps"], apiVersions:["v1"], resources: ["deployments"]` and `matchPolicy: Equivalent`,
+	// an API request to apps/v1beta1 deployments would be converted and sent to the webhook
+	// with `kind: {group:"apps", version:"v1", kind:"Deployment"}` (matching the rule the webhook registered for),
+	// and `requestKind: {group:"apps", version:"v1beta1", kind:"Deployment"}` (indicating the kind of the original API request).
+	//
+	// See documentation for the "matchPolicy" field in the webhook configuration type for more details.
+	// +optional
+	RequestKind *metav1.GroupVersionKind `json:"requestKind,omitempty" protobuf:"bytes,13,opt,name=requestKind"`
+	// RequestResource is the fully-qualified resource of the original API request (for example, v1.pods).
+	// If this is specified and differs from the value in "resource", an equivalent match and conversion was performed.
+	//
+	// For example, if deployments can be modified via apps/v1 and apps/v1beta1, and a webhook registered a rule of
+	// `apiGroups:["apps"], apiVersions:["v1"], resources: ["deployments"]` and `matchPolicy: Equivalent`,
+	// an API request to apps/v1beta1 deployments would be converted and sent to the webhook
+	// with `resource: {group:"apps", version:"v1", resource:"deployments"}` (matching the resource the webhook registered for),
+	// and `requestResource: {group:"apps", version:"v1beta1", resource:"deployments"}` (indicating the resource of the original API request).
+	//
+	// See documentation for the "matchPolicy" field in the webhook configuration type.
+	// +optional
+	RequestResource *metav1.GroupVersionResource `json:"requestResource,omitempty" protobuf:"bytes,14,opt,name=requestResource"`
+	// RequestSubResource is the name of the subresource of the original API request, if any (for example, "status" or "scale")
+	// If this is specified and differs from the value in "subResource", an equivalent match and conversion was performed.
+	// See documentation for the "matchPolicy" field in the webhook configuration type.
+	// +optional
+	RequestSubResource string `json:"requestSubResource,omitempty" protobuf:"bytes,15,opt,name=requestSubResource"`
+
+	// Name is the name of the object as presented in the request.  On a CREATE operation, the client may omit name and
+	// rely on the server to generate the name.  If that is the case, this field will contain an empty string.
+	// +optional
+	Name string `json:"name,omitempty" protobuf:"bytes,5,opt,name=name"`
+	// Namespace is the namespace associated with the request (if any).
+	// +optional
+	Namespace string `json:"namespace,omitempty" protobuf:"bytes,6,opt,name=namespace"`
+	// Operation is the operation being performed. This may be different than the operation
+	// requested. e.g. a patch can result in either a CREATE or UPDATE Operation.
+	Operation Operation `json:"operation" protobuf:"bytes,7,opt,name=operation"`
+	// UserInfo is information about the requesting user
+	UserInfo authenticationv1.UserInfo `json:"userInfo" protobuf:"bytes,8,opt,name=userInfo"`
+	// Object is the object from the incoming request.
+	// +optional
+	Object runtime.RawExtension `json:"object,omitempty" protobuf:"bytes,9,opt,name=object"`
+	// OldObject is the existing object. Only populated for DELETE and UPDATE requests.
+	// +optional
+	OldObject runtime.RawExtension `json:"oldObject,omitempty" protobuf:"bytes,10,opt,name=oldObject"`
+	// DryRun indicates that modifications will definitely not be persisted for this request.
+	// Defaults to false.
+	// +optional
+	DryRun *bool `json:"dryRun,omitempty" protobuf:"varint,11,opt,name=dryRun"`
+	// Options is the operation option structure of the operation being performed.
+	// e.g. `meta.k8s.io/v1.DeleteOptions` or `meta.k8s.io/v1.CreateOptions`. This may be
+	// different than the options the caller provided. e.g. for a patch request the performed
+	// Operation might be a CREATE, in which case the Options will a
+	// `meta.k8s.io/v1.CreateOptions` even though the caller provided `meta.k8s.io/v1.PatchOptions`.
+	// +optional
+	Options runtime.RawExtension `json:"options,omitempty" protobuf:"bytes,12,opt,name=options"`
+}
+
+// AdmissionResponse describes an admission response.
+type AdmissionResponse struct {
+	// UID is an identifier for the individual request/response.
+	// This must be copied over from the corresponding AdmissionRequest.
+	UID types.UID `json:"uid" protobuf:"bytes,1,opt,name=uid"`
+
+	// Allowed indicates whether or not the admission request was permitted.
+	Allowed bool `json:"allowed" protobuf:"varint,2,opt,name=allowed"`
+
+	// Result contains extra details into why an admission request was denied.
+	// This field IS NOT consulted in any way if "Allowed" is "true".
+	// +optional
+	Result *metav1.Status `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
+
+	// The patch body. Currently we only support "JSONPatch" which implements RFC 6902.
+	// +optional
+	Patch []byte `json:"patch,omitempty" protobuf:"bytes,4,opt,name=patch"`
+
+	// The type of Patch. Currently we only allow "JSONPatch".
+	// +optional
+	PatchType *PatchType `json:"patchType,omitempty" protobuf:"bytes,5,opt,name=patchType"`
+
+	// AuditAnnotations is an unstructured key value map set by remote admission controller (e.g. error=image-blacklisted).
+	// MutatingAdmissionWebhook and ValidatingAdmissionWebhook admission controller will prefix the keys with
+	// admission webhook name (e.g. imagepolicy.example.com/error=image-blacklisted). AuditAnnotations will be provided by
+	// the admission webhook to add additional context to the audit log for this request.
+	// +optional
+	AuditAnnotations map[string]string `json:"auditAnnotations,omitempty" protobuf:"bytes,6,opt,name=auditAnnotations"`
+}
+
+// PatchType is the type of patch being used to represent the mutated object
+type PatchType string
+
+// PatchType constants.
+const (
+	PatchTypeJSONPatch PatchType = "JSONPatch"
+)
+
+// Operation is the type of resource operation being checked for admission control
+type Operation string
+
+// Operation constants
+const (
+	Create  Operation = "CREATE"
+	Update  Operation = "UPDATE"
+	Delete  Operation = "DELETE"
+	Connect Operation = "CONNECT"
+)
diff --git a/vendor/k8s.io/api/admission/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/admission/v1/types_swagger_doc_generated.go
new file mode 100644
index 00000000..62351b16
--- /dev/null
+++ b/vendor/k8s.io/api/admission/v1/types_swagger_doc_generated.go
@@ -0,0 +1,77 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+// This file contains a collection of methods that can be used from go-restful to
+// generate Swagger API documentation for its models. Please read this PR for more
+// information on the implementation: https://github.com/emicklei/go-restful/pull/215
+//
+// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if
+// they are on one line! For multiple line or blocks that you want to ignore use ---.
+// Any context after a --- is ignored.
+//
+// Those methods can be generated by using hack/update-generated-swagger-docs.sh
+
+// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT.
+var map_AdmissionRequest = map[string]string{
+	"":                   "AdmissionRequest describes the admission.Attributes for the admission request.",
+	"uid":                "UID is an identifier for the individual request/response. It allows us to distinguish instances of requests which are otherwise identical (parallel requests, requests when earlier requests did not modify etc) The UID is meant to track the round trip (request/response) between the KAS and the WebHook, not the user request. It is suitable for correlating log entries between the webhook and apiserver, for either auditing or debugging.",
+	"kind":               "Kind is the fully-qualified type of object being submitted (for example, v1.Pod or autoscaling.v1.Scale)",
+	"resource":           "Resource is the fully-qualified resource being requested (for example, v1.pods)",
+	"subResource":        "SubResource is the subresource being requested, if any (for example, \"status\" or \"scale\")",
+	"requestKind":        "RequestKind is the fully-qualified type of the original API request (for example, v1.Pod or autoscaling.v1.Scale). If this is specified and differs from the value in \"kind\", an equivalent match and conversion was performed.\n\nFor example, if deployments can be modified via apps/v1 and apps/v1beta1, and a webhook registered a rule of `apiGroups:[\"apps\"], apiVersions:[\"v1\"], resources: [\"deployments\"]` and `matchPolicy: Equivalent`, an API request to apps/v1beta1 deployments would be converted and sent to the webhook with `kind: {group:\"apps\", version:\"v1\", kind:\"Deployment\"}` (matching the rule the webhook registered for), and `requestKind: {group:\"apps\", version:\"v1beta1\", kind:\"Deployment\"}` (indicating the kind of the original API request).\n\nSee documentation for the \"matchPolicy\" field in the webhook configuration type for more details.",
+	"requestResource":    "RequestResource is the fully-qualified resource of the original API request (for example, v1.pods). If this is specified and differs from the value in \"resource\", an equivalent match and conversion was performed.\n\nFor example, if deployments can be modified via apps/v1 and apps/v1beta1, and a webhook registered a rule of `apiGroups:[\"apps\"], apiVersions:[\"v1\"], resources: [\"deployments\"]` and `matchPolicy: Equivalent`, an API request to apps/v1beta1 deployments would be converted and sent to the webhook with `resource: {group:\"apps\", version:\"v1\", resource:\"deployments\"}` (matching the resource the webhook registered for), and `requestResource: {group:\"apps\", version:\"v1beta1\", resource:\"deployments\"}` (indicating the resource of the original API request).\n\nSee documentation for the \"matchPolicy\" field in the webhook configuration type.",
+	"requestSubResource": "RequestSubResource is the name of the subresource of the original API request, if any (for example, \"status\" or \"scale\") If this is specified and differs from the value in \"subResource\", an equivalent match and conversion was performed. See documentation for the \"matchPolicy\" field in the webhook configuration type.",
+	"name":               "Name is the name of the object as presented in the request.  On a CREATE operation, the client may omit name and rely on the server to generate the name.  If that is the case, this field will contain an empty string.",
+	"namespace":          "Namespace is the namespace associated with the request (if any).",
+	"operation":          "Operation is the operation being performed. This may be different than the operation requested. e.g. a patch can result in either a CREATE or UPDATE Operation.",
+	"userInfo":           "UserInfo is information about the requesting user",
+	"object":             "Object is the object from the incoming request.",
+	"oldObject":          "OldObject is the existing object. Only populated for DELETE and UPDATE requests.",
+	"dryRun":             "DryRun indicates that modifications will definitely not be persisted for this request. Defaults to false.",
+	"options":            "Options is the operation option structure of the operation being performed. e.g. `meta.k8s.io/v1.DeleteOptions` or `meta.k8s.io/v1.CreateOptions`. This may be different than the options the caller provided. e.g. for a patch request the performed Operation might be a CREATE, in which case the Options will a `meta.k8s.io/v1.CreateOptions` even though the caller provided `meta.k8s.io/v1.PatchOptions`.",
+}
+
+func (AdmissionRequest) SwaggerDoc() map[string]string {
+	return map_AdmissionRequest
+}
+
+var map_AdmissionResponse = map[string]string{
+	"":                 "AdmissionResponse describes an admission response.",
+	"uid":              "UID is an identifier for the individual request/response. This must be copied over from the corresponding AdmissionRequest.",
+	"allowed":          "Allowed indicates whether or not the admission request was permitted.",
+	"status":           "Result contains extra details into why an admission request was denied. This field IS NOT consulted in any way if \"Allowed\" is \"true\".",
+	"patch":            "The patch body. Currently we only support \"JSONPatch\" which implements RFC 6902.",
+	"patchType":        "The type of Patch. Currently we only allow \"JSONPatch\".",
+	"auditAnnotations": "AuditAnnotations is an unstructured key value map set by remote admission controller (e.g. error=image-blacklisted). MutatingAdmissionWebhook and ValidatingAdmissionWebhook admission controller will prefix the keys with admission webhook name (e.g. imagepolicy.example.com/error=image-blacklisted). AuditAnnotations will be provided by the admission webhook to add additional context to the audit log for this request.",
+}
+
+func (AdmissionResponse) SwaggerDoc() map[string]string {
+	return map_AdmissionResponse
+}
+
+var map_AdmissionReview = map[string]string{
+	"":         "AdmissionReview describes an admission review request/response.",
+	"request":  "Request describes the attributes for the admission request.",
+	"response": "Response describes the attributes for the admission response.",
+}
+
+func (AdmissionReview) SwaggerDoc() map[string]string {
+	return map_AdmissionReview
+}
+
+// AUTO-GENERATED FUNCTIONS END HERE
diff --git a/vendor/k8s.io/api/admission/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/admission/v1/zz_generated.deepcopy.go
new file mode 100644
index 00000000..42954ca4
--- /dev/null
+++ b/vendor/k8s.io/api/admission/v1/zz_generated.deepcopy.go
@@ -0,0 +1,136 @@
+// +build !ignore_autogenerated
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by deepcopy-gen. DO NOT EDIT.
+
+package v1
+
+import (
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	runtime "k8s.io/apimachinery/pkg/runtime"
+)
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AdmissionRequest) DeepCopyInto(out *AdmissionRequest) {
+	*out = *in
+	out.Kind = in.Kind
+	out.Resource = in.Resource
+	if in.RequestKind != nil {
+		in, out := &in.RequestKind, &out.RequestKind
+		*out = new(metav1.GroupVersionKind)
+		**out = **in
+	}
+	if in.RequestResource != nil {
+		in, out := &in.RequestResource, &out.RequestResource
+		*out = new(metav1.GroupVersionResource)
+		**out = **in
+	}
+	in.UserInfo.DeepCopyInto(&out.UserInfo)
+	in.Object.DeepCopyInto(&out.Object)
+	in.OldObject.DeepCopyInto(&out.OldObject)
+	if in.DryRun != nil {
+		in, out := &in.DryRun, &out.DryRun
+		*out = new(bool)
+		**out = **in
+	}
+	in.Options.DeepCopyInto(&out.Options)
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdmissionRequest.
+func (in *AdmissionRequest) DeepCopy() *AdmissionRequest {
+	if in == nil {
+		return nil
+	}
+	out := new(AdmissionRequest)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AdmissionResponse) DeepCopyInto(out *AdmissionResponse) {
+	*out = *in
+	if in.Result != nil {
+		in, out := &in.Result, &out.Result
+		*out = new(metav1.Status)
+		(*in).DeepCopyInto(*out)
+	}
+	if in.Patch != nil {
+		in, out := &in.Patch, &out.Patch
+		*out = make([]byte, len(*in))
+		copy(*out, *in)
+	}
+	if in.PatchType != nil {
+		in, out := &in.PatchType, &out.PatchType
+		*out = new(PatchType)
+		**out = **in
+	}
+	if in.AuditAnnotations != nil {
+		in, out := &in.AuditAnnotations, &out.AuditAnnotations
+		*out = make(map[string]string, len(*in))
+		for key, val := range *in {
+			(*out)[key] = val
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdmissionResponse.
+func (in *AdmissionResponse) DeepCopy() *AdmissionResponse {
+	if in == nil {
+		return nil
+	}
+	out := new(AdmissionResponse)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AdmissionReview) DeepCopyInto(out *AdmissionReview) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	if in.Request != nil {
+		in, out := &in.Request, &out.Request
+		*out = new(AdmissionRequest)
+		(*in).DeepCopyInto(*out)
+	}
+	if in.Response != nil {
+		in, out := &in.Response, &out.Response
+		*out = new(AdmissionResponse)
+		(*in).DeepCopyInto(*out)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdmissionReview.
+func (in *AdmissionReview) DeepCopy() *AdmissionReview {
+	if in == nil {
+		return nil
+	}
+	out := new(AdmissionReview)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *AdmissionReview) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
diff --git a/vendor/k8s.io/api/admission/v1beta1/doc.go b/vendor/k8s.io/api/admission/v1beta1/doc.go
new file mode 100644
index 00000000..92f7c19d
--- /dev/null
+++ b/vendor/k8s.io/api/admission/v1beta1/doc.go
@@ -0,0 +1,23 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// +k8s:deepcopy-gen=package
+// +k8s:protobuf-gen=package
+// +k8s:openapi-gen=false
+
+// +groupName=admission.k8s.io
+
+package v1beta1 // import "k8s.io/api/admission/v1beta1"
diff --git a/vendor/k8s.io/api/admission/v1beta1/generated.pb.go b/vendor/k8s.io/api/admission/v1beta1/generated.pb.go
new file mode 100644
index 00000000..d694203f
--- /dev/null
+++ b/vendor/k8s.io/api/admission/v1beta1/generated.pb.go
@@ -0,0 +1,1743 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by protoc-gen-gogo. DO NOT EDIT.
+// source: k8s.io/kubernetes/vendor/k8s.io/api/admission/v1beta1/generated.proto
+
+package v1beta1
+
+import (
+	fmt "fmt"
+
+	io "io"
+
+	proto "github.com/gogo/protobuf/proto"
+	github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys"
+	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+
+	math "math"
+	math_bits "math/bits"
+	reflect "reflect"
+	strings "strings"
+
+	k8s_io_apimachinery_pkg_types "k8s.io/apimachinery/pkg/types"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
+
+func (m *AdmissionRequest) Reset()      { *m = AdmissionRequest{} }
+func (*AdmissionRequest) ProtoMessage() {}
+func (*AdmissionRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_b87c2352de86eab9, []int{0}
+}
+func (m *AdmissionRequest) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *AdmissionRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *AdmissionRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_AdmissionRequest.Merge(m, src)
+}
+func (m *AdmissionRequest) XXX_Size() int {
+	return m.Size()
+}
+func (m *AdmissionRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_AdmissionRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AdmissionRequest proto.InternalMessageInfo
+
+func (m *AdmissionResponse) Reset()      { *m = AdmissionResponse{} }
+func (*AdmissionResponse) ProtoMessage() {}
+func (*AdmissionResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_b87c2352de86eab9, []int{1}
+}
+func (m *AdmissionResponse) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *AdmissionResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *AdmissionResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_AdmissionResponse.Merge(m, src)
+}
+func (m *AdmissionResponse) XXX_Size() int {
+	return m.Size()
+}
+func (m *AdmissionResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_AdmissionResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AdmissionResponse proto.InternalMessageInfo
+
+func (m *AdmissionReview) Reset()      { *m = AdmissionReview{} }
+func (*AdmissionReview) ProtoMessage() {}
+func (*AdmissionReview) Descriptor() ([]byte, []int) {
+	return fileDescriptor_b87c2352de86eab9, []int{2}
+}
+func (m *AdmissionReview) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *AdmissionReview) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *AdmissionReview) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_AdmissionReview.Merge(m, src)
+}
+func (m *AdmissionReview) XXX_Size() int {
+	return m.Size()
+}
+func (m *AdmissionReview) XXX_DiscardUnknown() {
+	xxx_messageInfo_AdmissionReview.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AdmissionReview proto.InternalMessageInfo
+
+func init() {
+	proto.RegisterType((*AdmissionRequest)(nil), "k8s.io.api.admission.v1beta1.AdmissionRequest")
+	proto.RegisterType((*AdmissionResponse)(nil), "k8s.io.api.admission.v1beta1.AdmissionResponse")
+	proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.admission.v1beta1.AdmissionResponse.AuditAnnotationsEntry")
+	proto.RegisterType((*AdmissionReview)(nil), "k8s.io.api.admission.v1beta1.AdmissionReview")
+}
+
+func init() {
+	proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/admission/v1beta1/generated.proto", fileDescriptor_b87c2352de86eab9)
+}
+
+var fileDescriptor_b87c2352de86eab9 = []byte{
+	// 902 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x55, 0x4f, 0x6f, 0x1b, 0x45,
+	0x14, 0xf7, 0xd6, 0x8e, 0xed, 0x1d, 0x87, 0xda, 0x9d, 0x82, 0xb4, 0xb2, 0xd0, 0xda, 0xe4, 0x80,
+	0x82, 0xd4, 0xcc, 0x92, 0x08, 0xaa, 0xa8, 0xe2, 0x92, 0x25, 0x11, 0x0a, 0x48, 0x4d, 0x34, 0xad,
+	0x51, 0xe1, 0x80, 0x34, 0xf6, 0x4e, 0xed, 0xc5, 0xf6, 0xcc, 0xb2, 0x33, 0xeb, 0xe0, 0x1b, 0xe2,
+	0xca, 0x85, 0x6f, 0xc0, 0x87, 0xe1, 0x92, 0x63, 0x8f, 0x3d, 0x59, 0xc4, 0x7c, 0x8b, 0x9c, 0xd0,
+	0xcc, 0xce, 0x7a, 0xb7, 0x76, 0x02, 0xfd, 0xc3, 0xc9, 0xf3, 0xfe, 0xfc, 0x7e, 0xef, 0xf9, 0xf7,
+	0x76, 0xde, 0x80, 0x93, 0xf1, 0xa1, 0x40, 0x21, 0xf7, 0xc6, 0x49, 0x9f, 0xc6, 0x8c, 0x4a, 0x2a,
+	0xbc, 0x19, 0x65, 0x01, 0x8f, 0x3d, 0x13, 0x20, 0x51, 0xe8, 0x91, 0x60, 0x1a, 0x0a, 0x11, 0x72,
+	0xe6, 0xcd, 0xf6, 0xfb, 0x54, 0x92, 0x7d, 0x6f, 0x48, 0x19, 0x8d, 0x89, 0xa4, 0x01, 0x8a, 0x62,
+	0x2e, 0x39, 0xfc, 0x30, 0xcd, 0x46, 0x24, 0x0a, 0xd1, 0x2a, 0x1b, 0x99, 0xec, 0xf6, 0xde, 0x30,
+	0x94, 0xa3, 0xa4, 0x8f, 0x06, 0x7c, 0xea, 0x0d, 0xf9, 0x90, 0x7b, 0x1a, 0xd4, 0x4f, 0x9e, 0x6b,
+	0x4b, 0x1b, 0xfa, 0x94, 0x92, 0xb5, 0x1f, 0x14, 0x4b, 0x27, 0x72, 0x44, 0x99, 0x0c, 0x07, 0x44,
+	0xa6, 0xf5, 0xd7, 0x4b, 0xb7, 0x3f, 0xcb, 0xb3, 0xa7, 0x64, 0x30, 0x0a, 0x19, 0x8d, 0xe7, 0x5e,
+	0x34, 0x1e, 0x2a, 0x87, 0xf0, 0xa6, 0x54, 0x92, 0x9b, 0x50, 0xde, 0x6d, 0xa8, 0x38, 0x61, 0x32,
+	0x9c, 0xd2, 0x0d, 0xc0, 0xc3, 0xff, 0x02, 0x88, 0xc1, 0x88, 0x4e, 0xc9, 0x3a, 0x6e, 0xe7, 0x0f,
+	0x1b, 0xb4, 0x8e, 0x32, 0x45, 0x30, 0xfd, 0x29, 0xa1, 0x42, 0x42, 0x1f, 0x94, 0x93, 0x30, 0x70,
+	0xac, 0xae, 0xb5, 0x6b, 0xfb, 0x9f, 0x5e, 0x2e, 0x3a, 0xa5, 0xe5, 0xa2, 0x53, 0xee, 0x9d, 0x1e,
+	0x5f, 0x2f, 0x3a, 0x1f, 0xdd, 0x56, 0x48, 0xce, 0x23, 0x2a, 0x50, 0xef, 0xf4, 0x18, 0x2b, 0x30,
+	0x7c, 0x06, 0x2a, 0xe3, 0x90, 0x05, 0xce, 0x9d, 0xae, 0xb5, 0xdb, 0x38, 0x78, 0x88, 0xf2, 0x09,
+	0xac, 0x60, 0x28, 0x1a, 0x0f, 0x95, 0x43, 0x20, 0x25, 0x03, 0x9a, 0xed, 0xa3, 0xaf, 0x62, 0x9e,
+	0x44, 0xdf, 0xd2, 0x58, 0x35, 0xf3, 0x4d, 0xc8, 0x02, 0x7f, 0xdb, 0x14, 0xaf, 0x28, 0x0b, 0x6b,
+	0x46, 0x38, 0x02, 0xf5, 0x98, 0x0a, 0x9e, 0xc4, 0x03, 0xea, 0x94, 0x35, 0xfb, 0xa3, 0x37, 0x67,
+	0xc7, 0x86, 0xc1, 0x6f, 0x99, 0x0a, 0xf5, 0xcc, 0x83, 0x57, 0xec, 0xf0, 0x73, 0xd0, 0x10, 0x49,
+	0x3f, 0x0b, 0x38, 0x15, 0xad, 0xc7, 0x7d, 0x03, 0x68, 0x3c, 0xc9, 0x43, 0xb8, 0x98, 0x07, 0x43,
+	0xd0, 0x88, 0x53, 0x25, 0x55, 0xd7, 0xce, 0x7b, 0xef, 0xa4, 0x40, 0x53, 0x95, 0xc2, 0x39, 0x1d,
+	0x2e, 0x72, 0xc3, 0x39, 0x68, 0x1a, 0x73, 0xd5, 0xe5, 0xdd, 0x77, 0x96, 0xe4, 0xfe, 0x72, 0xd1,
+	0x69, 0xe2, 0x57, 0x69, 0xf1, 0x7a, 0x1d, 0xf8, 0x35, 0x80, 0xc6, 0x55, 0x10, 0xc2, 0x69, 0x6a,
+	0x8d, 0xda, 0x46, 0x23, 0x88, 0x37, 0x32, 0xf0, 0x0d, 0x28, 0xd8, 0x05, 0x15, 0x46, 0xa6, 0xd4,
+	0xd9, 0xd2, 0xe8, 0xd5, 0xd0, 0x1f, 0x93, 0x29, 0xc5, 0x3a, 0x02, 0x3d, 0x60, 0xab, 0x5f, 0x11,
+	0x91, 0x01, 0x75, 0xaa, 0x3a, 0xed, 0x9e, 0x49, 0xb3, 0x1f, 0x67, 0x01, 0x9c, 0xe7, 0xc0, 0x2f,
+	0x80, 0xcd, 0x23, 0xf5, 0xa9, 0x87, 0x9c, 0x39, 0x35, 0x0d, 0x70, 0x33, 0xc0, 0x59, 0x16, 0xb8,
+	0x2e, 0x1a, 0x38, 0x07, 0xc0, 0xa7, 0xa0, 0x9e, 0x08, 0x1a, 0x9f, 0xb2, 0xe7, 0xdc, 0xa9, 0x6b,
+	0x41, 0x3f, 0x46, 0xc5, 0x1d, 0xf2, 0xca, 0xb5, 0x57, 0x42, 0xf6, 0x4c, 0x76, 0xfe, 0x3d, 0x65,
+	0x1e, 0xbc, 0x62, 0x82, 0x3d, 0x50, 0xe5, 0xfd, 0x1f, 0xe9, 0x40, 0x3a, 0xb6, 0xe6, 0xdc, 0xbb,
+	0x75, 0x48, 0xe6, 0xd6, 0x22, 0x4c, 0x2e, 0x4e, 0x7e, 0x96, 0x94, 0xa9, 0xf9, 0xf8, 0x77, 0x0d,
+	0x75, 0xf5, 0x4c, 0x93, 0x60, 0x43, 0x06, 0x7f, 0x00, 0x36, 0x9f, 0x04, 0xa9, 0xd3, 0x01, 0x6f,
+	0xc3, 0xbc, 0x92, 0xf2, 0x2c, 0xe3, 0xc1, 0x39, 0x25, 0xdc, 0x01, 0xd5, 0x20, 0x9e, 0xe3, 0x84,
+	0x39, 0x8d, 0xae, 0xb5, 0x5b, 0xf7, 0x81, 0xea, 0xe1, 0x58, 0x7b, 0xb0, 0x89, 0xc0, 0x67, 0xa0,
+	0xc6, 0x23, 0x25, 0x86, 0x70, 0xb6, 0xdf, 0xa6, 0x83, 0xa6, 0xe9, 0xa0, 0x76, 0x96, 0xb2, 0xe0,
+	0x8c, 0x6e, 0xe7, 0xd7, 0x0a, 0xb8, 0x57, 0xd8, 0x50, 0x22, 0xe2, 0x4c, 0xd0, 0xff, 0x65, 0x45,
+	0x7d, 0x02, 0x6a, 0x64, 0x32, 0xe1, 0x17, 0x34, 0xdd, 0x52, 0xf5, 0xbc, 0x89, 0xa3, 0xd4, 0x8d,
+	0xb3, 0x38, 0x3c, 0x07, 0x55, 0x21, 0x89, 0x4c, 0x84, 0xd9, 0x38, 0x0f, 0x5e, 0xef, 0x7a, 0x3d,
+	0xd1, 0x98, 0x54, 0x30, 0x4c, 0x45, 0x32, 0x91, 0xd8, 0xf0, 0xc0, 0x0e, 0xd8, 0x8a, 0x88, 0x1c,
+	0x8c, 0xf4, 0x56, 0xd9, 0xf6, 0xed, 0xe5, 0xa2, 0xb3, 0x75, 0xae, 0x1c, 0x38, 0xf5, 0xc3, 0x43,
+	0x60, 0xeb, 0xc3, 0xd3, 0x79, 0x94, 0x5d, 0x8c, 0xb6, 0x1a, 0xd1, 0x79, 0xe6, 0xbc, 0x2e, 0x1a,
+	0x38, 0x4f, 0x86, 0xbf, 0x59, 0xa0, 0x45, 0x92, 0x20, 0x94, 0x47, 0x8c, 0x71, 0x49, 0xd2, 0xa9,
+	0x54, 0xbb, 0xe5, 0xdd, 0xc6, 0xc1, 0x09, 0xfa, 0xb7, 0x97, 0x10, 0x6d, 0xe8, 0x8c, 0x8e, 0xd6,
+	0x78, 0x4e, 0x98, 0x8c, 0xe7, 0xbe, 0x63, 0x84, 0x6a, 0xad, 0x87, 0xf1, 0x46, 0xe1, 0xf6, 0x97,
+	0xe0, 0x83, 0x1b, 0x49, 0x60, 0x0b, 0x94, 0xc7, 0x74, 0x9e, 0x8e, 0x10, 0xab, 0x23, 0x7c, 0x1f,
+	0x6c, 0xcd, 0xc8, 0x24, 0xa1, 0x7a, 0x1c, 0x36, 0x4e, 0x8d, 0x47, 0x77, 0x0e, 0xad, 0x9d, 0x3f,
+	0x2d, 0xd0, 0x2c, 0x34, 0x37, 0x0b, 0xe9, 0x05, 0xec, 0x81, 0x9a, 0x59, 0x25, 0x9a, 0xa3, 0x71,
+	0x80, 0x5e, 0xfb, 0xcf, 0x69, 0x94, 0xdf, 0x50, 0xa3, 0xce, 0xf6, 0x5c, 0xc6, 0x05, 0xbf, 0xd3,
+	0xcf, 0x8b, 0xfe, 0xf7, 0xe6, 0xf1, 0xf2, 0xde, 0x50, 0x34, 0x7f, 0xdb, 0xbc, 0x27, 0xda, 0xc2,
+	0x2b, 0x3a, 0x7f, 0xef, 0xf2, 0xca, 0x2d, 0xbd, 0xb8, 0x72, 0x4b, 0x2f, 0xaf, 0xdc, 0xd2, 0x2f,
+	0x4b, 0xd7, 0xba, 0x5c, 0xba, 0xd6, 0x8b, 0xa5, 0x6b, 0xbd, 0x5c, 0xba, 0xd6, 0x5f, 0x4b, 0xd7,
+	0xfa, 0xfd, 0x6f, 0xb7, 0xf4, 0x7d, 0xcd, 0x10, 0xff, 0x13, 0x00, 0x00, 0xff, 0xff, 0x8b, 0xd1,
+	0x27, 0x74, 0xfd, 0x08, 0x00, 0x00,
+}
+
+func (m *AdmissionRequest) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *AdmissionRequest) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *AdmissionRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	i -= len(m.RequestSubResource)
+	copy(dAtA[i:], m.RequestSubResource)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.RequestSubResource)))
+	i--
+	dAtA[i] = 0x7a
+	if m.RequestResource != nil {
+		{
+			size, err := m.RequestResource.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x72
+	}
+	if m.RequestKind != nil {
+		{
+			size, err := m.RequestKind.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x6a
+	}
+	{
+		size, err := m.Options.MarshalToSizedBuffer(dAtA[:i])
+		if err != nil {
+			return 0, err
+		}
+		i -= size
+		i = encodeVarintGenerated(dAtA, i, uint64(size))
+	}
+	i--
+	dAtA[i] = 0x62
+	if m.DryRun != nil {
+		i--
+		if *m.DryRun {
+			dAtA[i] = 1
+		} else {
+			dAtA[i] = 0
+		}
+		i--
+		dAtA[i] = 0x58
+	}
+	{
+		size, err := m.OldObject.MarshalToSizedBuffer(dAtA[:i])
+		if err != nil {
+			return 0, err
+		}
+		i -= size
+		i = encodeVarintGenerated(dAtA, i, uint64(size))
+	}
+	i--
+	dAtA[i] = 0x52
+	{
+		size, err := m.Object.MarshalToSizedBuffer(dAtA[:i])
+		if err != nil {
+			return 0, err
+		}
+		i -= size
+		i = encodeVarintGenerated(dAtA, i, uint64(size))
+	}
+	i--
+	dAtA[i] = 0x4a
+	{
+		size, err := m.UserInfo.MarshalToSizedBuffer(dAtA[:i])
+		if err != nil {
+			return 0, err
+		}
+		i -= size
+		i = encodeVarintGenerated(dAtA, i, uint64(size))
+	}
+	i--
+	dAtA[i] = 0x42
+	i -= len(m.Operation)
+	copy(dAtA[i:], m.Operation)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Operation)))
+	i--
+	dAtA[i] = 0x3a
+	i -= len(m.Namespace)
+	copy(dAtA[i:], m.Namespace)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace)))
+	i--
+	dAtA[i] = 0x32
+	i -= len(m.Name)
+	copy(dAtA[i:], m.Name)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name)))
+	i--
+	dAtA[i] = 0x2a
+	i -= len(m.SubResource)
+	copy(dAtA[i:], m.SubResource)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.SubResource)))
+	i--
+	dAtA[i] = 0x22
+	{
+		size, err := m.Resource.MarshalToSizedBuffer(dAtA[:i])
+		if err != nil {
+			return 0, err
+		}
+		i -= size
+		i = encodeVarintGenerated(dAtA, i, uint64(size))
+	}
+	i--
+	dAtA[i] = 0x1a
+	{
+		size, err := m.Kind.MarshalToSizedBuffer(dAtA[:i])
+		if err != nil {
+			return 0, err
+		}
+		i -= size
+		i = encodeVarintGenerated(dAtA, i, uint64(size))
+	}
+	i--
+	dAtA[i] = 0x12
+	i -= len(m.UID)
+	copy(dAtA[i:], m.UID)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID)))
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *AdmissionResponse) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *AdmissionResponse) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *AdmissionResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if len(m.AuditAnnotations) > 0 {
+		keysForAuditAnnotations := make([]string, 0, len(m.AuditAnnotations))
+		for k := range m.AuditAnnotations {
+			keysForAuditAnnotations = append(keysForAuditAnnotations, string(k))
+		}
+		github_com_gogo_protobuf_sortkeys.Strings(keysForAuditAnnotations)
+		for iNdEx := len(keysForAuditAnnotations) - 1; iNdEx >= 0; iNdEx-- {
+			v := m.AuditAnnotations[string(keysForAuditAnnotations[iNdEx])]
+			baseI := i
+			i -= len(v)
+			copy(dAtA[i:], v)
+			i = encodeVarintGenerated(dAtA, i, uint64(len(v)))
+			i--
+			dAtA[i] = 0x12
+			i -= len(keysForAuditAnnotations[iNdEx])
+			copy(dAtA[i:], keysForAuditAnnotations[iNdEx])
+			i = encodeVarintGenerated(dAtA, i, uint64(len(keysForAuditAnnotations[iNdEx])))
+			i--
+			dAtA[i] = 0xa
+			i = encodeVarintGenerated(dAtA, i, uint64(baseI-i))
+			i--
+			dAtA[i] = 0x32
+		}
+	}
+	if m.PatchType != nil {
+		i -= len(*m.PatchType)
+		copy(dAtA[i:], *m.PatchType)
+		i = encodeVarintGenerated(dAtA, i, uint64(len(*m.PatchType)))
+		i--
+		dAtA[i] = 0x2a
+	}
+	if m.Patch != nil {
+		i -= len(m.Patch)
+		copy(dAtA[i:], m.Patch)
+		i = encodeVarintGenerated(dAtA, i, uint64(len(m.Patch)))
+		i--
+		dAtA[i] = 0x22
+	}
+	if m.Result != nil {
+		{
+			size, err := m.Result.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x1a
+	}
+	i--
+	if m.Allowed {
+		dAtA[i] = 1
+	} else {
+		dAtA[i] = 0
+	}
+	i--
+	dAtA[i] = 0x10
+	i -= len(m.UID)
+	copy(dAtA[i:], m.UID)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID)))
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *AdmissionReview) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *AdmissionReview) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *AdmissionReview) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.Response != nil {
+		{
+			size, err := m.Response.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x12
+	}
+	if m.Request != nil {
+		{
+			size, err := m.Request.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+
+func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int {
+	offset -= sovGenerated(v)
+	base := offset
+	for v >= 1<<7 {
+		dAtA[offset] = uint8(v&0x7f | 0x80)
+		v >>= 7
+		offset++
+	}
+	dAtA[offset] = uint8(v)
+	return base
+}
+func (m *AdmissionRequest) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.UID)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = m.Kind.Size()
+	n += 1 + l + sovGenerated(uint64(l))
+	l = m.Resource.Size()
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.SubResource)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.Name)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.Namespace)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = len(m.Operation)
+	n += 1 + l + sovGenerated(uint64(l))
+	l = m.UserInfo.Size()
+	n += 1 + l + sovGenerated(uint64(l))
+	l = m.Object.Size()
+	n += 1 + l + sovGenerated(uint64(l))
+	l = m.OldObject.Size()
+	n += 1 + l + sovGenerated(uint64(l))
+	if m.DryRun != nil {
+		n += 2
+	}
+	l = m.Options.Size()
+	n += 1 + l + sovGenerated(uint64(l))
+	if m.RequestKind != nil {
+		l = m.RequestKind.Size()
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	if m.RequestResource != nil {
+		l = m.RequestResource.Size()
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	l = len(m.RequestSubResource)
+	n += 1 + l + sovGenerated(uint64(l))
+	return n
+}
+
+func (m *AdmissionResponse) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.UID)
+	n += 1 + l + sovGenerated(uint64(l))
+	n += 2
+	if m.Result != nil {
+		l = m.Result.Size()
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	if m.Patch != nil {
+		l = len(m.Patch)
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	if m.PatchType != nil {
+		l = len(*m.PatchType)
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	if len(m.AuditAnnotations) > 0 {
+		for k, v := range m.AuditAnnotations {
+			_ = k
+			_ = v
+			mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v)))
+			n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize))
+		}
+	}
+	return n
+}
+
+func (m *AdmissionReview) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.Request != nil {
+		l = m.Request.Size()
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	if m.Response != nil {
+		l = m.Response.Size()
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	return n
+}
+
+func sovGenerated(x uint64) (n int) {
+	return (math_bits.Len64(x|1) + 6) / 7
+}
+func sozGenerated(x uint64) (n int) {
+	return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (this *AdmissionRequest) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&AdmissionRequest{`,
+		`UID:` + fmt.Sprintf("%v", this.UID) + `,`,
+		`Kind:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Kind), "GroupVersionKind", "v1.GroupVersionKind", 1), `&`, ``, 1) + `,`,
+		`Resource:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Resource), "GroupVersionResource", "v1.GroupVersionResource", 1), `&`, ``, 1) + `,`,
+		`SubResource:` + fmt.Sprintf("%v", this.SubResource) + `,`,
+		`Name:` + fmt.Sprintf("%v", this.Name) + `,`,
+		`Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`,
+		`Operation:` + fmt.Sprintf("%v", this.Operation) + `,`,
+		`UserInfo:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.UserInfo), "UserInfo", "v11.UserInfo", 1), `&`, ``, 1) + `,`,
+		`Object:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Object), "RawExtension", "runtime.RawExtension", 1), `&`, ``, 1) + `,`,
+		`OldObject:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.OldObject), "RawExtension", "runtime.RawExtension", 1), `&`, ``, 1) + `,`,
+		`DryRun:` + valueToStringGenerated(this.DryRun) + `,`,
+		`Options:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Options), "RawExtension", "runtime.RawExtension", 1), `&`, ``, 1) + `,`,
+		`RequestKind:` + strings.Replace(fmt.Sprintf("%v", this.RequestKind), "GroupVersionKind", "v1.GroupVersionKind", 1) + `,`,
+		`RequestResource:` + strings.Replace(fmt.Sprintf("%v", this.RequestResource), "GroupVersionResource", "v1.GroupVersionResource", 1) + `,`,
+		`RequestSubResource:` + fmt.Sprintf("%v", this.RequestSubResource) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *AdmissionResponse) String() string {
+	if this == nil {
+		return "nil"
+	}
+	keysForAuditAnnotations := make([]string, 0, len(this.AuditAnnotations))
+	for k := range this.AuditAnnotations {
+		keysForAuditAnnotations = append(keysForAuditAnnotations, k)
+	}
+	github_com_gogo_protobuf_sortkeys.Strings(keysForAuditAnnotations)
+	mapStringForAuditAnnotations := "map[string]string{"
+	for _, k := range keysForAuditAnnotations {
+		mapStringForAuditAnnotations += fmt.Sprintf("%v: %v,", k, this.AuditAnnotations[k])
+	}
+	mapStringForAuditAnnotations += "}"
+	s := strings.Join([]string{`&AdmissionResponse{`,
+		`UID:` + fmt.Sprintf("%v", this.UID) + `,`,
+		`Allowed:` + fmt.Sprintf("%v", this.Allowed) + `,`,
+		`Result:` + strings.Replace(fmt.Sprintf("%v", this.Result), "Status", "v1.Status", 1) + `,`,
+		`Patch:` + valueToStringGenerated(this.Patch) + `,`,
+		`PatchType:` + valueToStringGenerated(this.PatchType) + `,`,
+		`AuditAnnotations:` + mapStringForAuditAnnotations + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *AdmissionReview) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&AdmissionReview{`,
+		`Request:` + strings.Replace(this.Request.String(), "AdmissionRequest", "AdmissionRequest", 1) + `,`,
+		`Response:` + strings.Replace(this.Response.String(), "AdmissionResponse", "AdmissionResponse", 1) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func valueToStringGenerated(v interface{}) string {
+	rv := reflect.ValueOf(v)
+	if rv.IsNil() {
+		return "nil"
+	}
+	pv := reflect.Indirect(rv).Interface()
+	return fmt.Sprintf("*%v", pv)
+}
+func (m *AdmissionRequest) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: AdmissionRequest: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: AdmissionRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field UID", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.UID = k8s_io_apimachinery_pkg_types.UID(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := m.Kind.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := m.Resource.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 4:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field SubResource", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.SubResource = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 5:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Name = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 6:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Namespace = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 7:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Operation", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Operation = Operation(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 8:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field UserInfo", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := m.UserInfo.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 9:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Object", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := m.Object.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 10:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field OldObject", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := m.OldObject.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 11:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field DryRun", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			b := bool(v != 0)
+			m.DryRun = &b
+		case 12:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Options", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := m.Options.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 13:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field RequestKind", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.RequestKind == nil {
+				m.RequestKind = &v1.GroupVersionKind{}
+			}
+			if err := m.RequestKind.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 14:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field RequestResource", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.RequestResource == nil {
+				m.RequestResource = &v1.GroupVersionResource{}
+			}
+			if err := m.RequestResource.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 15:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field RequestSubResource", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.RequestSubResource = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *AdmissionResponse) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: AdmissionResponse: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: AdmissionResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field UID", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.UID = k8s_io_apimachinery_pkg_types.UID(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Allowed", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.Allowed = bool(v != 0)
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Result", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Result == nil {
+				m.Result = &v1.Status{}
+			}
+			if err := m.Result.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 4:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Patch", wireType)
+			}
+			var byteLen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				byteLen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if byteLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + byteLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Patch = append(m.Patch[:0], dAtA[iNdEx:postIndex]...)
+			if m.Patch == nil {
+				m.Patch = []byte{}
+			}
+			iNdEx = postIndex
+		case 5:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field PatchType", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			s := PatchType(dAtA[iNdEx:postIndex])
+			m.PatchType = &s
+			iNdEx = postIndex
+		case 6:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field AuditAnnotations", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.AuditAnnotations == nil {
+				m.AuditAnnotations = make(map[string]string)
+			}
+			var mapkey string
+			var mapvalue string
+			for iNdEx < postIndex {
+				entryPreIndex := iNdEx
+				var wire uint64
+				for shift := uint(0); ; shift += 7 {
+					if shift >= 64 {
+						return ErrIntOverflowGenerated
+					}
+					if iNdEx >= l {
+						return io.ErrUnexpectedEOF
+					}
+					b := dAtA[iNdEx]
+					iNdEx++
+					wire |= uint64(b&0x7F) << shift
+					if b < 0x80 {
+						break
+					}
+				}
+				fieldNum := int32(wire >> 3)
+				if fieldNum == 1 {
+					var stringLenmapkey uint64
+					for shift := uint(0); ; shift += 7 {
+						if shift >= 64 {
+							return ErrIntOverflowGenerated
+						}
+						if iNdEx >= l {
+							return io.ErrUnexpectedEOF
+						}
+						b := dAtA[iNdEx]
+						iNdEx++
+						stringLenmapkey |= uint64(b&0x7F) << shift
+						if b < 0x80 {
+							break
+						}
+					}
+					intStringLenmapkey := int(stringLenmapkey)
+					if intStringLenmapkey < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					postStringIndexmapkey := iNdEx + intStringLenmapkey
+					if postStringIndexmapkey < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					if postStringIndexmapkey > l {
+						return io.ErrUnexpectedEOF
+					}
+					mapkey = string(dAtA[iNdEx:postStringIndexmapkey])
+					iNdEx = postStringIndexmapkey
+				} else if fieldNum == 2 {
+					var stringLenmapvalue uint64
+					for shift := uint(0); ; shift += 7 {
+						if shift >= 64 {
+							return ErrIntOverflowGenerated
+						}
+						if iNdEx >= l {
+							return io.ErrUnexpectedEOF
+						}
+						b := dAtA[iNdEx]
+						iNdEx++
+						stringLenmapvalue |= uint64(b&0x7F) << shift
+						if b < 0x80 {
+							break
+						}
+					}
+					intStringLenmapvalue := int(stringLenmapvalue)
+					if intStringLenmapvalue < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					postStringIndexmapvalue := iNdEx + intStringLenmapvalue
+					if postStringIndexmapvalue < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					if postStringIndexmapvalue > l {
+						return io.ErrUnexpectedEOF
+					}
+					mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue])
+					iNdEx = postStringIndexmapvalue
+				} else {
+					iNdEx = entryPreIndex
+					skippy, err := skipGenerated(dAtA[iNdEx:])
+					if err != nil {
+						return err
+					}
+					if skippy < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					if (iNdEx + skippy) > postIndex {
+						return io.ErrUnexpectedEOF
+					}
+					iNdEx += skippy
+				}
+			}
+			m.AuditAnnotations[mapkey] = mapvalue
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *AdmissionReview) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: AdmissionReview: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: AdmissionReview: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Request", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Request == nil {
+				m.Request = &AdmissionRequest{}
+			}
+			if err := m.Request.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Response", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Response == nil {
+				m.Response = &AdmissionResponse{}
+			}
+			if err := m.Response.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func skipGenerated(dAtA []byte) (n int, err error) {
+	l := len(dAtA)
+	iNdEx := 0
+	depth := 0
+	for iNdEx < l {
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return 0, ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return 0, io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		wireType := int(wire & 0x7)
+		switch wireType {
+		case 0:
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return 0, ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return 0, io.ErrUnexpectedEOF
+				}
+				iNdEx++
+				if dAtA[iNdEx-1] < 0x80 {
+					break
+				}
+			}
+		case 1:
+			iNdEx += 8
+		case 2:
+			var length int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return 0, ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return 0, io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				length |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if length < 0 {
+				return 0, ErrInvalidLengthGenerated
+			}
+			iNdEx += length
+		case 3:
+			depth++
+		case 4:
+			if depth == 0 {
+				return 0, ErrUnexpectedEndOfGroupGenerated
+			}
+			depth--
+		case 5:
+			iNdEx += 4
+		default:
+			return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+		}
+		if iNdEx < 0 {
+			return 0, ErrInvalidLengthGenerated
+		}
+		if depth == 0 {
+			return iNdEx, nil
+		}
+	}
+	return 0, io.ErrUnexpectedEOF
+}
+
+var (
+	ErrInvalidLengthGenerated        = fmt.Errorf("proto: negative length found during unmarshaling")
+	ErrIntOverflowGenerated          = fmt.Errorf("proto: integer overflow")
+	ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group")
+)
diff --git a/vendor/k8s.io/api/admission/v1beta1/generated.proto b/vendor/k8s.io/api/admission/v1beta1/generated.proto
new file mode 100644
index 00000000..6999b80c
--- /dev/null
+++ b/vendor/k8s.io/api/admission/v1beta1/generated.proto
@@ -0,0 +1,160 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+
+// This file was autogenerated by go-to-protobuf. Do not edit it manually!
+
+syntax = 'proto2';
+
+package k8s.io.api.admission.v1beta1;
+
+import "k8s.io/api/authentication/v1/generated.proto";
+import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto";
+import "k8s.io/apimachinery/pkg/runtime/generated.proto";
+import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto";
+
+// Package-wide variables from generator "generated".
+option go_package = "v1beta1";
+
+// AdmissionRequest describes the admission.Attributes for the admission request.
+message AdmissionRequest {
+  // UID is an identifier for the individual request/response. It allows us to distinguish instances of requests which are
+  // otherwise identical (parallel requests, requests when earlier requests did not modify etc)
+  // The UID is meant to track the round trip (request/response) between the KAS and the WebHook, not the user request.
+  // It is suitable for correlating log entries between the webhook and apiserver, for either auditing or debugging.
+  optional string uid = 1;
+
+  // Kind is the fully-qualified type of object being submitted (for example, v1.Pod or autoscaling.v1.Scale)
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersionKind kind = 2;
+
+  // Resource is the fully-qualified resource being requested (for example, v1.pods)
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersionResource resource = 3;
+
+  // SubResource is the subresource being requested, if any (for example, "status" or "scale")
+  // +optional
+  optional string subResource = 4;
+
+  // RequestKind is the fully-qualified type of the original API request (for example, v1.Pod or autoscaling.v1.Scale).
+  // If this is specified and differs from the value in "kind", an equivalent match and conversion was performed.
+  //
+  // For example, if deployments can be modified via apps/v1 and apps/v1beta1, and a webhook registered a rule of
+  // `apiGroups:["apps"], apiVersions:["v1"], resources: ["deployments"]` and `matchPolicy: Equivalent`,
+  // an API request to apps/v1beta1 deployments would be converted and sent to the webhook
+  // with `kind: {group:"apps", version:"v1", kind:"Deployment"}` (matching the rule the webhook registered for),
+  // and `requestKind: {group:"apps", version:"v1beta1", kind:"Deployment"}` (indicating the kind of the original API request).
+  //
+  // See documentation for the "matchPolicy" field in the webhook configuration type for more details.
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersionKind requestKind = 13;
+
+  // RequestResource is the fully-qualified resource of the original API request (for example, v1.pods).
+  // If this is specified and differs from the value in "resource", an equivalent match and conversion was performed.
+  //
+  // For example, if deployments can be modified via apps/v1 and apps/v1beta1, and a webhook registered a rule of
+  // `apiGroups:["apps"], apiVersions:["v1"], resources: ["deployments"]` and `matchPolicy: Equivalent`,
+  // an API request to apps/v1beta1 deployments would be converted and sent to the webhook
+  // with `resource: {group:"apps", version:"v1", resource:"deployments"}` (matching the resource the webhook registered for),
+  // and `requestResource: {group:"apps", version:"v1beta1", resource:"deployments"}` (indicating the resource of the original API request).
+  //
+  // See documentation for the "matchPolicy" field in the webhook configuration type.
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersionResource requestResource = 14;
+
+  // RequestSubResource is the name of the subresource of the original API request, if any (for example, "status" or "scale")
+  // If this is specified and differs from the value in "subResource", an equivalent match and conversion was performed.
+  // See documentation for the "matchPolicy" field in the webhook configuration type.
+  // +optional
+  optional string requestSubResource = 15;
+
+  // Name is the name of the object as presented in the request.  On a CREATE operation, the client may omit name and
+  // rely on the server to generate the name.  If that is the case, this field will contain an empty string.
+  // +optional
+  optional string name = 5;
+
+  // Namespace is the namespace associated with the request (if any).
+  // +optional
+  optional string namespace = 6;
+
+  // Operation is the operation being performed. This may be different than the operation
+  // requested. e.g. a patch can result in either a CREATE or UPDATE Operation.
+  optional string operation = 7;
+
+  // UserInfo is information about the requesting user
+  optional k8s.io.api.authentication.v1.UserInfo userInfo = 8;
+
+  // Object is the object from the incoming request.
+  // +optional
+  optional k8s.io.apimachinery.pkg.runtime.RawExtension object = 9;
+
+  // OldObject is the existing object. Only populated for DELETE and UPDATE requests.
+  // +optional
+  optional k8s.io.apimachinery.pkg.runtime.RawExtension oldObject = 10;
+
+  // DryRun indicates that modifications will definitely not be persisted for this request.
+  // Defaults to false.
+  // +optional
+  optional bool dryRun = 11;
+
+  // Options is the operation option structure of the operation being performed.
+  // e.g. `meta.k8s.io/v1.DeleteOptions` or `meta.k8s.io/v1.CreateOptions`. This may be
+  // different than the options the caller provided. e.g. for a patch request the performed
+  // Operation might be a CREATE, in which case the Options will a
+  // `meta.k8s.io/v1.CreateOptions` even though the caller provided `meta.k8s.io/v1.PatchOptions`.
+  // +optional
+  optional k8s.io.apimachinery.pkg.runtime.RawExtension options = 12;
+}
+
+// AdmissionResponse describes an admission response.
+message AdmissionResponse {
+  // UID is an identifier for the individual request/response.
+  // This should be copied over from the corresponding AdmissionRequest.
+  optional string uid = 1;
+
+  // Allowed indicates whether or not the admission request was permitted.
+  optional bool allowed = 2;
+
+  // Result contains extra details into why an admission request was denied.
+  // This field IS NOT consulted in any way if "Allowed" is "true".
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.Status status = 3;
+
+  // The patch body. Currently we only support "JSONPatch" which implements RFC 6902.
+  // +optional
+  optional bytes patch = 4;
+
+  // The type of Patch. Currently we only allow "JSONPatch".
+  // +optional
+  optional string patchType = 5;
+
+  // AuditAnnotations is an unstructured key value map set by remote admission controller (e.g. error=image-blacklisted).
+  // MutatingAdmissionWebhook and ValidatingAdmissionWebhook admission controller will prefix the keys with
+  // admission webhook name (e.g. imagepolicy.example.com/error=image-blacklisted). AuditAnnotations will be provided by
+  // the admission webhook to add additional context to the audit log for this request.
+  // +optional
+  map<string, string> auditAnnotations = 6;
+}
+
+// AdmissionReview describes an admission review request/response.
+message AdmissionReview {
+  // Request describes the attributes for the admission request.
+  // +optional
+  optional AdmissionRequest request = 1;
+
+  // Response describes the attributes for the admission response.
+  // +optional
+  optional AdmissionResponse response = 2;
+}
+
diff --git a/vendor/k8s.io/api/admission/v1beta1/register.go b/vendor/k8s.io/api/admission/v1beta1/register.go
new file mode 100644
index 00000000..78d21a0c
--- /dev/null
+++ b/vendor/k8s.io/api/admission/v1beta1/register.go
@@ -0,0 +1,51 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1beta1
+
+import (
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	"k8s.io/apimachinery/pkg/runtime"
+	"k8s.io/apimachinery/pkg/runtime/schema"
+)
+
+// GroupName is the group name for this API.
+const GroupName = "admission.k8s.io"
+
+// SchemeGroupVersion is group version used to register these objects
+var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1beta1"}
+
+// Resource takes an unqualified resource and returns a Group qualified GroupResource
+func Resource(resource string) schema.GroupResource {
+	return SchemeGroupVersion.WithResource(resource).GroupResource()
+}
+
+var (
+	// TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api.
+	// localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes.
+	SchemeBuilder      = runtime.NewSchemeBuilder(addKnownTypes)
+	localSchemeBuilder = &SchemeBuilder
+	AddToScheme        = localSchemeBuilder.AddToScheme
+)
+
+// Adds the list of known types to the given scheme.
+func addKnownTypes(scheme *runtime.Scheme) error {
+	scheme.AddKnownTypes(SchemeGroupVersion,
+		&AdmissionReview{},
+	)
+	metav1.AddToGroupVersion(scheme, SchemeGroupVersion)
+	return nil
+}
diff --git a/vendor/k8s.io/api/admission/v1beta1/types.go b/vendor/k8s.io/api/admission/v1beta1/types.go
new file mode 100644
index 00000000..2cb9ea55
--- /dev/null
+++ b/vendor/k8s.io/api/admission/v1beta1/types.go
@@ -0,0 +1,162 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1beta1
+
+import (
+	authenticationv1 "k8s.io/api/authentication/v1"
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	"k8s.io/apimachinery/pkg/runtime"
+	"k8s.io/apimachinery/pkg/types"
+)
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// AdmissionReview describes an admission review request/response.
+type AdmissionReview struct {
+	metav1.TypeMeta `json:",inline"`
+	// Request describes the attributes for the admission request.
+	// +optional
+	Request *AdmissionRequest `json:"request,omitempty" protobuf:"bytes,1,opt,name=request"`
+	// Response describes the attributes for the admission response.
+	// +optional
+	Response *AdmissionResponse `json:"response,omitempty" protobuf:"bytes,2,opt,name=response"`
+}
+
+// AdmissionRequest describes the admission.Attributes for the admission request.
+type AdmissionRequest struct {
+	// UID is an identifier for the individual request/response. It allows us to distinguish instances of requests which are
+	// otherwise identical (parallel requests, requests when earlier requests did not modify etc)
+	// The UID is meant to track the round trip (request/response) between the KAS and the WebHook, not the user request.
+	// It is suitable for correlating log entries between the webhook and apiserver, for either auditing or debugging.
+	UID types.UID `json:"uid" protobuf:"bytes,1,opt,name=uid"`
+	// Kind is the fully-qualified type of object being submitted (for example, v1.Pod or autoscaling.v1.Scale)
+	Kind metav1.GroupVersionKind `json:"kind" protobuf:"bytes,2,opt,name=kind"`
+	// Resource is the fully-qualified resource being requested (for example, v1.pods)
+	Resource metav1.GroupVersionResource `json:"resource" protobuf:"bytes,3,opt,name=resource"`
+	// SubResource is the subresource being requested, if any (for example, "status" or "scale")
+	// +optional
+	SubResource string `json:"subResource,omitempty" protobuf:"bytes,4,opt,name=subResource"`
+
+	// RequestKind is the fully-qualified type of the original API request (for example, v1.Pod or autoscaling.v1.Scale).
+	// If this is specified and differs from the value in "kind", an equivalent match and conversion was performed.
+	//
+	// For example, if deployments can be modified via apps/v1 and apps/v1beta1, and a webhook registered a rule of
+	// `apiGroups:["apps"], apiVersions:["v1"], resources: ["deployments"]` and `matchPolicy: Equivalent`,
+	// an API request to apps/v1beta1 deployments would be converted and sent to the webhook
+	// with `kind: {group:"apps", version:"v1", kind:"Deployment"}` (matching the rule the webhook registered for),
+	// and `requestKind: {group:"apps", version:"v1beta1", kind:"Deployment"}` (indicating the kind of the original API request).
+	//
+	// See documentation for the "matchPolicy" field in the webhook configuration type for more details.
+	// +optional
+	RequestKind *metav1.GroupVersionKind `json:"requestKind,omitempty" protobuf:"bytes,13,opt,name=requestKind"`
+	// RequestResource is the fully-qualified resource of the original API request (for example, v1.pods).
+	// If this is specified and differs from the value in "resource", an equivalent match and conversion was performed.
+	//
+	// For example, if deployments can be modified via apps/v1 and apps/v1beta1, and a webhook registered a rule of
+	// `apiGroups:["apps"], apiVersions:["v1"], resources: ["deployments"]` and `matchPolicy: Equivalent`,
+	// an API request to apps/v1beta1 deployments would be converted and sent to the webhook
+	// with `resource: {group:"apps", version:"v1", resource:"deployments"}` (matching the resource the webhook registered for),
+	// and `requestResource: {group:"apps", version:"v1beta1", resource:"deployments"}` (indicating the resource of the original API request).
+	//
+	// See documentation for the "matchPolicy" field in the webhook configuration type.
+	// +optional
+	RequestResource *metav1.GroupVersionResource `json:"requestResource,omitempty" protobuf:"bytes,14,opt,name=requestResource"`
+	// RequestSubResource is the name of the subresource of the original API request, if any (for example, "status" or "scale")
+	// If this is specified and differs from the value in "subResource", an equivalent match and conversion was performed.
+	// See documentation for the "matchPolicy" field in the webhook configuration type.
+	// +optional
+	RequestSubResource string `json:"requestSubResource,omitempty" protobuf:"bytes,15,opt,name=requestSubResource"`
+
+	// Name is the name of the object as presented in the request.  On a CREATE operation, the client may omit name and
+	// rely on the server to generate the name.  If that is the case, this field will contain an empty string.
+	// +optional
+	Name string `json:"name,omitempty" protobuf:"bytes,5,opt,name=name"`
+	// Namespace is the namespace associated with the request (if any).
+	// +optional
+	Namespace string `json:"namespace,omitempty" protobuf:"bytes,6,opt,name=namespace"`
+	// Operation is the operation being performed. This may be different than the operation
+	// requested. e.g. a patch can result in either a CREATE or UPDATE Operation.
+	Operation Operation `json:"operation" protobuf:"bytes,7,opt,name=operation"`
+	// UserInfo is information about the requesting user
+	UserInfo authenticationv1.UserInfo `json:"userInfo" protobuf:"bytes,8,opt,name=userInfo"`
+	// Object is the object from the incoming request.
+	// +optional
+	Object runtime.RawExtension `json:"object,omitempty" protobuf:"bytes,9,opt,name=object"`
+	// OldObject is the existing object. Only populated for DELETE and UPDATE requests.
+	// +optional
+	OldObject runtime.RawExtension `json:"oldObject,omitempty" protobuf:"bytes,10,opt,name=oldObject"`
+	// DryRun indicates that modifications will definitely not be persisted for this request.
+	// Defaults to false.
+	// +optional
+	DryRun *bool `json:"dryRun,omitempty" protobuf:"varint,11,opt,name=dryRun"`
+	// Options is the operation option structure of the operation being performed.
+	// e.g. `meta.k8s.io/v1.DeleteOptions` or `meta.k8s.io/v1.CreateOptions`. This may be
+	// different than the options the caller provided. e.g. for a patch request the performed
+	// Operation might be a CREATE, in which case the Options will a
+	// `meta.k8s.io/v1.CreateOptions` even though the caller provided `meta.k8s.io/v1.PatchOptions`.
+	// +optional
+	Options runtime.RawExtension `json:"options,omitempty" protobuf:"bytes,12,opt,name=options"`
+}
+
+// AdmissionResponse describes an admission response.
+type AdmissionResponse struct {
+	// UID is an identifier for the individual request/response.
+	// This should be copied over from the corresponding AdmissionRequest.
+	UID types.UID `json:"uid" protobuf:"bytes,1,opt,name=uid"`
+
+	// Allowed indicates whether or not the admission request was permitted.
+	Allowed bool `json:"allowed" protobuf:"varint,2,opt,name=allowed"`
+
+	// Result contains extra details into why an admission request was denied.
+	// This field IS NOT consulted in any way if "Allowed" is "true".
+	// +optional
+	Result *metav1.Status `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
+
+	// The patch body. Currently we only support "JSONPatch" which implements RFC 6902.
+	// +optional
+	Patch []byte `json:"patch,omitempty" protobuf:"bytes,4,opt,name=patch"`
+
+	// The type of Patch. Currently we only allow "JSONPatch".
+	// +optional
+	PatchType *PatchType `json:"patchType,omitempty" protobuf:"bytes,5,opt,name=patchType"`
+
+	// AuditAnnotations is an unstructured key value map set by remote admission controller (e.g. error=image-blacklisted).
+	// MutatingAdmissionWebhook and ValidatingAdmissionWebhook admission controller will prefix the keys with
+	// admission webhook name (e.g. imagepolicy.example.com/error=image-blacklisted). AuditAnnotations will be provided by
+	// the admission webhook to add additional context to the audit log for this request.
+	// +optional
+	AuditAnnotations map[string]string `json:"auditAnnotations,omitempty" protobuf:"bytes,6,opt,name=auditAnnotations"`
+}
+
+// PatchType is the type of patch being used to represent the mutated object
+type PatchType string
+
+// PatchType constants.
+const (
+	PatchTypeJSONPatch PatchType = "JSONPatch"
+)
+
+// Operation is the type of resource operation being checked for admission control
+type Operation string
+
+// Operation constants
+const (
+	Create  Operation = "CREATE"
+	Update  Operation = "UPDATE"
+	Delete  Operation = "DELETE"
+	Connect Operation = "CONNECT"
+)
diff --git a/vendor/k8s.io/api/admission/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/admission/v1beta1/types_swagger_doc_generated.go
new file mode 100644
index 00000000..2ef98db8
--- /dev/null
+++ b/vendor/k8s.io/api/admission/v1beta1/types_swagger_doc_generated.go
@@ -0,0 +1,77 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1beta1
+
+// This file contains a collection of methods that can be used from go-restful to
+// generate Swagger API documentation for its models. Please read this PR for more
+// information on the implementation: https://github.com/emicklei/go-restful/pull/215
+//
+// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if
+// they are on one line! For multiple line or blocks that you want to ignore use ---.
+// Any context after a --- is ignored.
+//
+// Those methods can be generated by using hack/update-generated-swagger-docs.sh
+
+// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT.
+var map_AdmissionRequest = map[string]string{
+	"":                   "AdmissionRequest describes the admission.Attributes for the admission request.",
+	"uid":                "UID is an identifier for the individual request/response. It allows us to distinguish instances of requests which are otherwise identical (parallel requests, requests when earlier requests did not modify etc) The UID is meant to track the round trip (request/response) between the KAS and the WebHook, not the user request. It is suitable for correlating log entries between the webhook and apiserver, for either auditing or debugging.",
+	"kind":               "Kind is the fully-qualified type of object being submitted (for example, v1.Pod or autoscaling.v1.Scale)",
+	"resource":           "Resource is the fully-qualified resource being requested (for example, v1.pods)",
+	"subResource":        "SubResource is the subresource being requested, if any (for example, \"status\" or \"scale\")",
+	"requestKind":        "RequestKind is the fully-qualified type of the original API request (for example, v1.Pod or autoscaling.v1.Scale). If this is specified and differs from the value in \"kind\", an equivalent match and conversion was performed.\n\nFor example, if deployments can be modified via apps/v1 and apps/v1beta1, and a webhook registered a rule of `apiGroups:[\"apps\"], apiVersions:[\"v1\"], resources: [\"deployments\"]` and `matchPolicy: Equivalent`, an API request to apps/v1beta1 deployments would be converted and sent to the webhook with `kind: {group:\"apps\", version:\"v1\", kind:\"Deployment\"}` (matching the rule the webhook registered for), and `requestKind: {group:\"apps\", version:\"v1beta1\", kind:\"Deployment\"}` (indicating the kind of the original API request).\n\nSee documentation for the \"matchPolicy\" field in the webhook configuration type for more details.",
+	"requestResource":    "RequestResource is the fully-qualified resource of the original API request (for example, v1.pods). If this is specified and differs from the value in \"resource\", an equivalent match and conversion was performed.\n\nFor example, if deployments can be modified via apps/v1 and apps/v1beta1, and a webhook registered a rule of `apiGroups:[\"apps\"], apiVersions:[\"v1\"], resources: [\"deployments\"]` and `matchPolicy: Equivalent`, an API request to apps/v1beta1 deployments would be converted and sent to the webhook with `resource: {group:\"apps\", version:\"v1\", resource:\"deployments\"}` (matching the resource the webhook registered for), and `requestResource: {group:\"apps\", version:\"v1beta1\", resource:\"deployments\"}` (indicating the resource of the original API request).\n\nSee documentation for the \"matchPolicy\" field in the webhook configuration type.",
+	"requestSubResource": "RequestSubResource is the name of the subresource of the original API request, if any (for example, \"status\" or \"scale\") If this is specified and differs from the value in \"subResource\", an equivalent match and conversion was performed. See documentation for the \"matchPolicy\" field in the webhook configuration type.",
+	"name":               "Name is the name of the object as presented in the request.  On a CREATE operation, the client may omit name and rely on the server to generate the name.  If that is the case, this field will contain an empty string.",
+	"namespace":          "Namespace is the namespace associated with the request (if any).",
+	"operation":          "Operation is the operation being performed. This may be different than the operation requested. e.g. a patch can result in either a CREATE or UPDATE Operation.",
+	"userInfo":           "UserInfo is information about the requesting user",
+	"object":             "Object is the object from the incoming request.",
+	"oldObject":          "OldObject is the existing object. Only populated for DELETE and UPDATE requests.",
+	"dryRun":             "DryRun indicates that modifications will definitely not be persisted for this request. Defaults to false.",
+	"options":            "Options is the operation option structure of the operation being performed. e.g. `meta.k8s.io/v1.DeleteOptions` or `meta.k8s.io/v1.CreateOptions`. This may be different than the options the caller provided. e.g. for a patch request the performed Operation might be a CREATE, in which case the Options will a `meta.k8s.io/v1.CreateOptions` even though the caller provided `meta.k8s.io/v1.PatchOptions`.",
+}
+
+func (AdmissionRequest) SwaggerDoc() map[string]string {
+	return map_AdmissionRequest
+}
+
+var map_AdmissionResponse = map[string]string{
+	"":                 "AdmissionResponse describes an admission response.",
+	"uid":              "UID is an identifier for the individual request/response. This should be copied over from the corresponding AdmissionRequest.",
+	"allowed":          "Allowed indicates whether or not the admission request was permitted.",
+	"status":           "Result contains extra details into why an admission request was denied. This field IS NOT consulted in any way if \"Allowed\" is \"true\".",
+	"patch":            "The patch body. Currently we only support \"JSONPatch\" which implements RFC 6902.",
+	"patchType":        "The type of Patch. Currently we only allow \"JSONPatch\".",
+	"auditAnnotations": "AuditAnnotations is an unstructured key value map set by remote admission controller (e.g. error=image-blacklisted). MutatingAdmissionWebhook and ValidatingAdmissionWebhook admission controller will prefix the keys with admission webhook name (e.g. imagepolicy.example.com/error=image-blacklisted). AuditAnnotations will be provided by the admission webhook to add additional context to the audit log for this request.",
+}
+
+func (AdmissionResponse) SwaggerDoc() map[string]string {
+	return map_AdmissionResponse
+}
+
+var map_AdmissionReview = map[string]string{
+	"":         "AdmissionReview describes an admission review request/response.",
+	"request":  "Request describes the attributes for the admission request.",
+	"response": "Response describes the attributes for the admission response.",
+}
+
+func (AdmissionReview) SwaggerDoc() map[string]string {
+	return map_AdmissionReview
+}
+
+// AUTO-GENERATED FUNCTIONS END HERE
diff --git a/vendor/k8s.io/api/admission/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/admission/v1beta1/zz_generated.deepcopy.go
new file mode 100644
index 00000000..e4704c86
--- /dev/null
+++ b/vendor/k8s.io/api/admission/v1beta1/zz_generated.deepcopy.go
@@ -0,0 +1,136 @@
+// +build !ignore_autogenerated
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by deepcopy-gen. DO NOT EDIT.
+
+package v1beta1
+
+import (
+	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	runtime "k8s.io/apimachinery/pkg/runtime"
+)
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AdmissionRequest) DeepCopyInto(out *AdmissionRequest) {
+	*out = *in
+	out.Kind = in.Kind
+	out.Resource = in.Resource
+	if in.RequestKind != nil {
+		in, out := &in.RequestKind, &out.RequestKind
+		*out = new(v1.GroupVersionKind)
+		**out = **in
+	}
+	if in.RequestResource != nil {
+		in, out := &in.RequestResource, &out.RequestResource
+		*out = new(v1.GroupVersionResource)
+		**out = **in
+	}
+	in.UserInfo.DeepCopyInto(&out.UserInfo)
+	in.Object.DeepCopyInto(&out.Object)
+	in.OldObject.DeepCopyInto(&out.OldObject)
+	if in.DryRun != nil {
+		in, out := &in.DryRun, &out.DryRun
+		*out = new(bool)
+		**out = **in
+	}
+	in.Options.DeepCopyInto(&out.Options)
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdmissionRequest.
+func (in *AdmissionRequest) DeepCopy() *AdmissionRequest {
+	if in == nil {
+		return nil
+	}
+	out := new(AdmissionRequest)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AdmissionResponse) DeepCopyInto(out *AdmissionResponse) {
+	*out = *in
+	if in.Result != nil {
+		in, out := &in.Result, &out.Result
+		*out = new(v1.Status)
+		(*in).DeepCopyInto(*out)
+	}
+	if in.Patch != nil {
+		in, out := &in.Patch, &out.Patch
+		*out = make([]byte, len(*in))
+		copy(*out, *in)
+	}
+	if in.PatchType != nil {
+		in, out := &in.PatchType, &out.PatchType
+		*out = new(PatchType)
+		**out = **in
+	}
+	if in.AuditAnnotations != nil {
+		in, out := &in.AuditAnnotations, &out.AuditAnnotations
+		*out = make(map[string]string, len(*in))
+		for key, val := range *in {
+			(*out)[key] = val
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdmissionResponse.
+func (in *AdmissionResponse) DeepCopy() *AdmissionResponse {
+	if in == nil {
+		return nil
+	}
+	out := new(AdmissionResponse)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *AdmissionReview) DeepCopyInto(out *AdmissionReview) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	if in.Request != nil {
+		in, out := &in.Request, &out.Request
+		*out = new(AdmissionRequest)
+		(*in).DeepCopyInto(*out)
+	}
+	if in.Response != nil {
+		in, out := &in.Response, &out.Response
+		*out = new(AdmissionResponse)
+		(*in).DeepCopyInto(*out)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdmissionReview.
+func (in *AdmissionReview) DeepCopy() *AdmissionReview {
+	if in == nil {
+		return nil
+	}
+	out := new(AdmissionReview)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *AdmissionReview) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
diff --git a/vendor/k8s.io/api/admissionregistration/v1/generated.pb.go b/vendor/k8s.io/api/admissionregistration/v1/generated.pb.go
index 1acb6345..adc47be7 100644
--- a/vendor/k8s.io/api/admissionregistration/v1/generated.pb.go
+++ b/vendor/k8s.io/api/admissionregistration/v1/generated.pb.go
@@ -42,7 +42,7 @@ var _ = math.Inf
 // is compatible with the proto package it is being compiled against.
 // A compilation error at this line likely means your copy of the
 // proto package needs to be updated.
-const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
+const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
 
 func (m *MutatingWebhook) Reset()      { *m = MutatingWebhook{} }
 func (*MutatingWebhook) ProtoMessage() {}
@@ -3360,6 +3360,7 @@ func (m *WebhookClientConfig) Unmarshal(dAtA []byte) error {
 func skipGenerated(dAtA []byte) (n int, err error) {
 	l := len(dAtA)
 	iNdEx := 0
+	depth := 0
 	for iNdEx < l {
 		var wire uint64
 		for shift := uint(0); ; shift += 7 {
@@ -3391,10 +3392,8 @@ func skipGenerated(dAtA []byte) (n int, err error) {
 					break
 				}
 			}
-			return iNdEx, nil
 		case 1:
 			iNdEx += 8
-			return iNdEx, nil
 		case 2:
 			var length int
 			for shift := uint(0); ; shift += 7 {
@@ -3415,55 +3414,30 @@ func skipGenerated(dAtA []byte) (n int, err error) {
 				return 0, ErrInvalidLengthGenerated
 			}
 			iNdEx += length
-			if iNdEx < 0 {
-				return 0, ErrInvalidLengthGenerated
-			}
-			return iNdEx, nil
 		case 3:
-			for {
-				var innerWire uint64
-				var start int = iNdEx
-				for shift := uint(0); ; shift += 7 {
-					if shift >= 64 {
-						return 0, ErrIntOverflowGenerated
-					}
-					if iNdEx >= l {
-						return 0, io.ErrUnexpectedEOF
-					}
-					b := dAtA[iNdEx]
-					iNdEx++
-					innerWire |= (uint64(b) & 0x7F) << shift
-					if b < 0x80 {
-						break
-					}
-				}
-				innerWireType := int(innerWire & 0x7)
-				if innerWireType == 4 {
-					break
-				}
-				next, err := skipGenerated(dAtA[start:])
-				if err != nil {
-					return 0, err
-				}
-				iNdEx = start + next
-				if iNdEx < 0 {
-					return 0, ErrInvalidLengthGenerated
-				}
-			}
-			return iNdEx, nil
+			depth++
 		case 4:
-			return iNdEx, nil
+			if depth == 0 {
+				return 0, ErrUnexpectedEndOfGroupGenerated
+			}
+			depth--
 		case 5:
 			iNdEx += 4
-			return iNdEx, nil
 		default:
 			return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
 		}
+		if iNdEx < 0 {
+			return 0, ErrInvalidLengthGenerated
+		}
+		if depth == 0 {
+			return iNdEx, nil
+		}
 	}
-	panic("unreachable")
+	return 0, io.ErrUnexpectedEOF
 }
 
 var (
-	ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling")
-	ErrIntOverflowGenerated   = fmt.Errorf("proto: integer overflow")
+	ErrInvalidLengthGenerated        = fmt.Errorf("proto: negative length found during unmarshaling")
+	ErrIntOverflowGenerated          = fmt.Errorf("proto: integer overflow")
+	ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group")
 )
diff --git a/vendor/k8s.io/api/admissionregistration/v1beta1/generated.pb.go b/vendor/k8s.io/api/admissionregistration/v1beta1/generated.pb.go
index d84d8b63..c98aa747 100644
--- a/vendor/k8s.io/api/admissionregistration/v1beta1/generated.pb.go
+++ b/vendor/k8s.io/api/admissionregistration/v1beta1/generated.pb.go
@@ -42,7 +42,7 @@ var _ = math.Inf
 // is compatible with the proto package it is being compiled against.
 // A compilation error at this line likely means your copy of the
 // proto package needs to be updated.
-const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
+const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
 
 func (m *MutatingWebhook) Reset()      { *m = MutatingWebhook{} }
 func (*MutatingWebhook) ProtoMessage() {}
@@ -3361,6 +3361,7 @@ func (m *WebhookClientConfig) Unmarshal(dAtA []byte) error {
 func skipGenerated(dAtA []byte) (n int, err error) {
 	l := len(dAtA)
 	iNdEx := 0
+	depth := 0
 	for iNdEx < l {
 		var wire uint64
 		for shift := uint(0); ; shift += 7 {
@@ -3392,10 +3393,8 @@ func skipGenerated(dAtA []byte) (n int, err error) {
 					break
 				}
 			}
-			return iNdEx, nil
 		case 1:
 			iNdEx += 8
-			return iNdEx, nil
 		case 2:
 			var length int
 			for shift := uint(0); ; shift += 7 {
@@ -3416,55 +3415,30 @@ func skipGenerated(dAtA []byte) (n int, err error) {
 				return 0, ErrInvalidLengthGenerated
 			}
 			iNdEx += length
-			if iNdEx < 0 {
-				return 0, ErrInvalidLengthGenerated
-			}
-			return iNdEx, nil
 		case 3:
-			for {
-				var innerWire uint64
-				var start int = iNdEx
-				for shift := uint(0); ; shift += 7 {
-					if shift >= 64 {
-						return 0, ErrIntOverflowGenerated
-					}
-					if iNdEx >= l {
-						return 0, io.ErrUnexpectedEOF
-					}
-					b := dAtA[iNdEx]
-					iNdEx++
-					innerWire |= (uint64(b) & 0x7F) << shift
-					if b < 0x80 {
-						break
-					}
-				}
-				innerWireType := int(innerWire & 0x7)
-				if innerWireType == 4 {
-					break
-				}
-				next, err := skipGenerated(dAtA[start:])
-				if err != nil {
-					return 0, err
-				}
-				iNdEx = start + next
-				if iNdEx < 0 {
-					return 0, ErrInvalidLengthGenerated
-				}
-			}
-			return iNdEx, nil
+			depth++
 		case 4:
-			return iNdEx, nil
+			if depth == 0 {
+				return 0, ErrUnexpectedEndOfGroupGenerated
+			}
+			depth--
 		case 5:
 			iNdEx += 4
-			return iNdEx, nil
 		default:
 			return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
 		}
+		if iNdEx < 0 {
+			return 0, ErrInvalidLengthGenerated
+		}
+		if depth == 0 {
+			return iNdEx, nil
+		}
 	}
-	panic("unreachable")
+	return 0, io.ErrUnexpectedEOF
 }
 
 var (
-	ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling")
-	ErrIntOverflowGenerated   = fmt.Errorf("proto: integer overflow")
+	ErrInvalidLengthGenerated        = fmt.Errorf("proto: negative length found during unmarshaling")
+	ErrIntOverflowGenerated          = fmt.Errorf("proto: integer overflow")
+	ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group")
 )
diff --git a/vendor/k8s.io/api/apps/v1/generated.pb.go b/vendor/k8s.io/api/apps/v1/generated.pb.go
index 425144d8..6ef25f50 100644
--- a/vendor/k8s.io/api/apps/v1/generated.pb.go
+++ b/vendor/k8s.io/api/apps/v1/generated.pb.go
@@ -46,7 +46,7 @@ var _ = math.Inf
 // is compatible with the proto package it is being compiled against.
 // A compilation error at this line likely means your copy of the
 // proto package needs to be updated.
-const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
+const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
 
 func (m *ControllerRevision) Reset()      { *m = ControllerRevision{} }
 func (*ControllerRevision) ProtoMessage() {}
@@ -8155,6 +8155,7 @@ func (m *StatefulSetUpdateStrategy) Unmarshal(dAtA []byte) error {
 func skipGenerated(dAtA []byte) (n int, err error) {
 	l := len(dAtA)
 	iNdEx := 0
+	depth := 0
 	for iNdEx < l {
 		var wire uint64
 		for shift := uint(0); ; shift += 7 {
@@ -8186,10 +8187,8 @@ func skipGenerated(dAtA []byte) (n int, err error) {
 					break
 				}
 			}
-			return iNdEx, nil
 		case 1:
 			iNdEx += 8
-			return iNdEx, nil
 		case 2:
 			var length int
 			for shift := uint(0); ; shift += 7 {
@@ -8210,55 +8209,30 @@ func skipGenerated(dAtA []byte) (n int, err error) {
 				return 0, ErrInvalidLengthGenerated
 			}
 			iNdEx += length
-			if iNdEx < 0 {
-				return 0, ErrInvalidLengthGenerated
-			}
-			return iNdEx, nil
 		case 3:
-			for {
-				var innerWire uint64
-				var start int = iNdEx
-				for shift := uint(0); ; shift += 7 {
-					if shift >= 64 {
-						return 0, ErrIntOverflowGenerated
-					}
-					if iNdEx >= l {
-						return 0, io.ErrUnexpectedEOF
-					}
-					b := dAtA[iNdEx]
-					iNdEx++
-					innerWire |= (uint64(b) & 0x7F) << shift
-					if b < 0x80 {
-						break
-					}
-				}
-				innerWireType := int(innerWire & 0x7)
-				if innerWireType == 4 {
-					break
-				}
-				next, err := skipGenerated(dAtA[start:])
-				if err != nil {
-					return 0, err
-				}
-				iNdEx = start + next
-				if iNdEx < 0 {
-					return 0, ErrInvalidLengthGenerated
-				}
-			}
-			return iNdEx, nil
+			depth++
 		case 4:
-			return iNdEx, nil
+			if depth == 0 {
+				return 0, ErrUnexpectedEndOfGroupGenerated
+			}
+			depth--
 		case 5:
 			iNdEx += 4
-			return iNdEx, nil
 		default:
 			return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
 		}
+		if iNdEx < 0 {
+			return 0, ErrInvalidLengthGenerated
+		}
+		if depth == 0 {
+			return iNdEx, nil
+		}
 	}
-	panic("unreachable")
+	return 0, io.ErrUnexpectedEOF
 }
 
 var (
-	ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling")
-	ErrIntOverflowGenerated   = fmt.Errorf("proto: integer overflow")
+	ErrInvalidLengthGenerated        = fmt.Errorf("proto: negative length found during unmarshaling")
+	ErrIntOverflowGenerated          = fmt.Errorf("proto: integer overflow")
+	ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group")
 )
diff --git a/vendor/k8s.io/api/apps/v1beta1/generated.pb.go b/vendor/k8s.io/api/apps/v1beta1/generated.pb.go
index 921e055c..f81b5590 100644
--- a/vendor/k8s.io/api/apps/v1beta1/generated.pb.go
+++ b/vendor/k8s.io/api/apps/v1beta1/generated.pb.go
@@ -47,7 +47,7 @@ var _ = math.Inf
 // is compatible with the proto package it is being compiled against.
 // A compilation error at this line likely means your copy of the
 // proto package needs to be updated.
-const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
+const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
 
 func (m *ControllerRevision) Reset()      { *m = ControllerRevision{} }
 func (*ControllerRevision) ProtoMessage() {}
@@ -6163,6 +6163,7 @@ func (m *StatefulSetUpdateStrategy) Unmarshal(dAtA []byte) error {
 func skipGenerated(dAtA []byte) (n int, err error) {
 	l := len(dAtA)
 	iNdEx := 0
+	depth := 0
 	for iNdEx < l {
 		var wire uint64
 		for shift := uint(0); ; shift += 7 {
@@ -6194,10 +6195,8 @@ func skipGenerated(dAtA []byte) (n int, err error) {
 					break
 				}
 			}
-			return iNdEx, nil
 		case 1:
 			iNdEx += 8
-			return iNdEx, nil
 		case 2:
 			var length int
 			for shift := uint(0); ; shift += 7 {
@@ -6218,55 +6217,30 @@ func skipGenerated(dAtA []byte) (n int, err error) {
 				return 0, ErrInvalidLengthGenerated
 			}
 			iNdEx += length
-			if iNdEx < 0 {
-				return 0, ErrInvalidLengthGenerated
-			}
-			return iNdEx, nil
 		case 3:
-			for {
-				var innerWire uint64
-				var start int = iNdEx
-				for shift := uint(0); ; shift += 7 {
-					if shift >= 64 {
-						return 0, ErrIntOverflowGenerated
-					}
-					if iNdEx >= l {
-						return 0, io.ErrUnexpectedEOF
-					}
-					b := dAtA[iNdEx]
-					iNdEx++
-					innerWire |= (uint64(b) & 0x7F) << shift
-					if b < 0x80 {
-						break
-					}
-				}
-				innerWireType := int(innerWire & 0x7)
-				if innerWireType == 4 {
-					break
-				}
-				next, err := skipGenerated(dAtA[start:])
-				if err != nil {
-					return 0, err
-				}
-				iNdEx = start + next
-				if iNdEx < 0 {
-					return 0, ErrInvalidLengthGenerated
-				}
-			}
-			return iNdEx, nil
+			depth++
 		case 4:
-			return iNdEx, nil
+			if depth == 0 {
+				return 0, ErrUnexpectedEndOfGroupGenerated
+			}
+			depth--
 		case 5:
 			iNdEx += 4
-			return iNdEx, nil
 		default:
 			return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
 		}
+		if iNdEx < 0 {
+			return 0, ErrInvalidLengthGenerated
+		}
+		if depth == 0 {
+			return iNdEx, nil
+		}
 	}
-	panic("unreachable")
+	return 0, io.ErrUnexpectedEOF
 }
 
 var (
-	ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling")
-	ErrIntOverflowGenerated   = fmt.Errorf("proto: integer overflow")
+	ErrInvalidLengthGenerated        = fmt.Errorf("proto: negative length found during unmarshaling")
+	ErrIntOverflowGenerated          = fmt.Errorf("proto: integer overflow")
+	ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group")
 )
diff --git a/vendor/k8s.io/api/apps/v1beta2/generated.pb.go b/vendor/k8s.io/api/apps/v1beta2/generated.pb.go
index 624bb942..8a9f2005 100644
--- a/vendor/k8s.io/api/apps/v1beta2/generated.pb.go
+++ b/vendor/k8s.io/api/apps/v1beta2/generated.pb.go
@@ -47,7 +47,7 @@ var _ = math.Inf
 // is compatible with the proto package it is being compiled against.
 // A compilation error at this line likely means your copy of the
 // proto package needs to be updated.
-const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
+const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
 
 func (m *ControllerRevision) Reset()      { *m = ControllerRevision{} }
 func (*ControllerRevision) ProtoMessage() {}
@@ -8931,6 +8931,7 @@ func (m *StatefulSetUpdateStrategy) Unmarshal(dAtA []byte) error {
 func skipGenerated(dAtA []byte) (n int, err error) {
 	l := len(dAtA)
 	iNdEx := 0
+	depth := 0
 	for iNdEx < l {
 		var wire uint64
 		for shift := uint(0); ; shift += 7 {
@@ -8962,10 +8963,8 @@ func skipGenerated(dAtA []byte) (n int, err error) {
 					break
 				}
 			}
-			return iNdEx, nil
 		case 1:
 			iNdEx += 8
-			return iNdEx, nil
 		case 2:
 			var length int
 			for shift := uint(0); ; shift += 7 {
@@ -8986,55 +8985,30 @@ func skipGenerated(dAtA []byte) (n int, err error) {
 				return 0, ErrInvalidLengthGenerated
 			}
 			iNdEx += length
-			if iNdEx < 0 {
-				return 0, ErrInvalidLengthGenerated
-			}
-			return iNdEx, nil
 		case 3:
-			for {
-				var innerWire uint64
-				var start int = iNdEx
-				for shift := uint(0); ; shift += 7 {
-					if shift >= 64 {
-						return 0, ErrIntOverflowGenerated
-					}
-					if iNdEx >= l {
-						return 0, io.ErrUnexpectedEOF
-					}
-					b := dAtA[iNdEx]
-					iNdEx++
-					innerWire |= (uint64(b) & 0x7F) << shift
-					if b < 0x80 {
-						break
-					}
-				}
-				innerWireType := int(innerWire & 0x7)
-				if innerWireType == 4 {
-					break
-				}
-				next, err := skipGenerated(dAtA[start:])
-				if err != nil {
-					return 0, err
-				}
-				iNdEx = start + next
-				if iNdEx < 0 {
-					return 0, ErrInvalidLengthGenerated
-				}
-			}
-			return iNdEx, nil
+			depth++
 		case 4:
-			return iNdEx, nil
+			if depth == 0 {
+				return 0, ErrUnexpectedEndOfGroupGenerated
+			}
+			depth--
 		case 5:
 			iNdEx += 4
-			return iNdEx, nil
 		default:
 			return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
 		}
+		if iNdEx < 0 {
+			return 0, ErrInvalidLengthGenerated
+		}
+		if depth == 0 {
+			return iNdEx, nil
+		}
 	}
-	panic("unreachable")
+	return 0, io.ErrUnexpectedEOF
 }
 
 var (
-	ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling")
-	ErrIntOverflowGenerated   = fmt.Errorf("proto: integer overflow")
+	ErrInvalidLengthGenerated        = fmt.Errorf("proto: negative length found during unmarshaling")
+	ErrIntOverflowGenerated          = fmt.Errorf("proto: integer overflow")
+	ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group")
 )
diff --git a/vendor/k8s.io/api/auditregistration/v1alpha1/generated.pb.go b/vendor/k8s.io/api/auditregistration/v1alpha1/generated.pb.go
index 003cc30b..f8eec3df 100644
--- a/vendor/k8s.io/api/auditregistration/v1alpha1/generated.pb.go
+++ b/vendor/k8s.io/api/auditregistration/v1alpha1/generated.pb.go
@@ -41,7 +41,7 @@ var _ = math.Inf
 // is compatible with the proto package it is being compiled against.
 // A compilation error at this line likely means your copy of the
 // proto package needs to be updated.
-const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
+const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
 
 func (m *AuditSink) Reset()      { *m = AuditSink{} }
 func (*AuditSink) ProtoMessage() {}
@@ -1947,6 +1947,7 @@ func (m *WebhookThrottleConfig) Unmarshal(dAtA []byte) error {
 func skipGenerated(dAtA []byte) (n int, err error) {
 	l := len(dAtA)
 	iNdEx := 0
+	depth := 0
 	for iNdEx < l {
 		var wire uint64
 		for shift := uint(0); ; shift += 7 {
@@ -1978,10 +1979,8 @@ func skipGenerated(dAtA []byte) (n int, err error) {
 					break
 				}
 			}
-			return iNdEx, nil
 		case 1:
 			iNdEx += 8
-			return iNdEx, nil
 		case 2:
 			var length int
 			for shift := uint(0); ; shift += 7 {
@@ -2002,55 +2001,30 @@ func skipGenerated(dAtA []byte) (n int, err error) {
 				return 0, ErrInvalidLengthGenerated
 			}
 			iNdEx += length
-			if iNdEx < 0 {
-				return 0, ErrInvalidLengthGenerated
-			}
-			return iNdEx, nil
 		case 3:
-			for {
-				var innerWire uint64
-				var start int = iNdEx
-				for shift := uint(0); ; shift += 7 {
-					if shift >= 64 {
-						return 0, ErrIntOverflowGenerated
-					}
-					if iNdEx >= l {
-						return 0, io.ErrUnexpectedEOF
-					}
-					b := dAtA[iNdEx]
-					iNdEx++
-					innerWire |= (uint64(b) & 0x7F) << shift
-					if b < 0x80 {
-						break
-					}
-				}
-				innerWireType := int(innerWire & 0x7)
-				if innerWireType == 4 {
-					break
-				}
-				next, err := skipGenerated(dAtA[start:])
-				if err != nil {
-					return 0, err
-				}
-				iNdEx = start + next
-				if iNdEx < 0 {
-					return 0, ErrInvalidLengthGenerated
-				}
-			}
-			return iNdEx, nil
+			depth++
 		case 4:
-			return iNdEx, nil
+			if depth == 0 {
+				return 0, ErrUnexpectedEndOfGroupGenerated
+			}
+			depth--
 		case 5:
 			iNdEx += 4
-			return iNdEx, nil
 		default:
 			return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
 		}
+		if iNdEx < 0 {
+			return 0, ErrInvalidLengthGenerated
+		}
+		if depth == 0 {
+			return iNdEx, nil
+		}
 	}
-	panic("unreachable")
+	return 0, io.ErrUnexpectedEOF
 }
 
 var (
-	ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling")
-	ErrIntOverflowGenerated   = fmt.Errorf("proto: integer overflow")
+	ErrInvalidLengthGenerated        = fmt.Errorf("proto: negative length found during unmarshaling")
+	ErrIntOverflowGenerated          = fmt.Errorf("proto: integer overflow")
+	ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group")
 )
diff --git a/vendor/k8s.io/api/authentication/v1/generated.pb.go b/vendor/k8s.io/api/authentication/v1/generated.pb.go
index 02be20de..6524f8ca 100644
--- a/vendor/k8s.io/api/authentication/v1/generated.pb.go
+++ b/vendor/k8s.io/api/authentication/v1/generated.pb.go
@@ -44,7 +44,7 @@ var _ = math.Inf
 // is compatible with the proto package it is being compiled against.
 // A compilation error at this line likely means your copy of the
 // proto package needs to be updated.
-const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
+const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
 
 func (m *BoundObjectReference) Reset()      { *m = BoundObjectReference{} }
 func (*BoundObjectReference) ProtoMessage() {}
@@ -2498,6 +2498,7 @@ func (m *UserInfo) Unmarshal(dAtA []byte) error {
 func skipGenerated(dAtA []byte) (n int, err error) {
 	l := len(dAtA)
 	iNdEx := 0
+	depth := 0
 	for iNdEx < l {
 		var wire uint64
 		for shift := uint(0); ; shift += 7 {
@@ -2529,10 +2530,8 @@ func skipGenerated(dAtA []byte) (n int, err error) {
 					break
 				}
 			}
-			return iNdEx, nil
 		case 1:
 			iNdEx += 8
-			return iNdEx, nil
 		case 2:
 			var length int
 			for shift := uint(0); ; shift += 7 {
@@ -2553,55 +2552,30 @@ func skipGenerated(dAtA []byte) (n int, err error) {
 				return 0, ErrInvalidLengthGenerated
 			}
 			iNdEx += length
-			if iNdEx < 0 {
-				return 0, ErrInvalidLengthGenerated
-			}
-			return iNdEx, nil
 		case 3:
-			for {
-				var innerWire uint64
-				var start int = iNdEx
-				for shift := uint(0); ; shift += 7 {
-					if shift >= 64 {
-						return 0, ErrIntOverflowGenerated
-					}
-					if iNdEx >= l {
-						return 0, io.ErrUnexpectedEOF
-					}
-					b := dAtA[iNdEx]
-					iNdEx++
-					innerWire |= (uint64(b) & 0x7F) << shift
-					if b < 0x80 {
-						break
-					}
-				}
-				innerWireType := int(innerWire & 0x7)
-				if innerWireType == 4 {
-					break
-				}
-				next, err := skipGenerated(dAtA[start:])
-				if err != nil {
-					return 0, err
-				}
-				iNdEx = start + next
-				if iNdEx < 0 {
-					return 0, ErrInvalidLengthGenerated
-				}
-			}
-			return iNdEx, nil
+			depth++
 		case 4:
-			return iNdEx, nil
+			if depth == 0 {
+				return 0, ErrUnexpectedEndOfGroupGenerated
+			}
+			depth--
 		case 5:
 			iNdEx += 4
-			return iNdEx, nil
 		default:
 			return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
 		}
+		if iNdEx < 0 {
+			return 0, ErrInvalidLengthGenerated
+		}
+		if depth == 0 {
+			return iNdEx, nil
+		}
 	}
-	panic("unreachable")
+	return 0, io.ErrUnexpectedEOF
 }
 
 var (
-	ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling")
-	ErrIntOverflowGenerated   = fmt.Errorf("proto: integer overflow")
+	ErrInvalidLengthGenerated        = fmt.Errorf("proto: negative length found during unmarshaling")
+	ErrIntOverflowGenerated          = fmt.Errorf("proto: integer overflow")
+	ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group")
 )
diff --git a/vendor/k8s.io/api/authentication/v1/types.go b/vendor/k8s.io/api/authentication/v1/types.go
index c48b0369..668b7203 100644
--- a/vendor/k8s.io/api/authentication/v1/types.go
+++ b/vendor/k8s.io/api/authentication/v1/types.go
@@ -40,7 +40,7 @@ const (
 
 // +genclient
 // +genclient:nonNamespaced
-// +genclient:noVerbs
+// +genclient:onlyVerbs=create
 // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
 
 // TokenReview attempts to authenticate a token to a known user.
diff --git a/vendor/k8s.io/api/authentication/v1beta1/generated.pb.go b/vendor/k8s.io/api/authentication/v1beta1/generated.pb.go
index 0721bda8..6c391dbf 100644
--- a/vendor/k8s.io/api/authentication/v1beta1/generated.pb.go
+++ b/vendor/k8s.io/api/authentication/v1beta1/generated.pb.go
@@ -42,7 +42,7 @@ var _ = math.Inf
 // is compatible with the proto package it is being compiled against.
 // A compilation error at this line likely means your copy of the
 // proto package needs to be updated.
-const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
+const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
 
 func (m *ExtraValue) Reset()      { *m = ExtraValue{} }
 func (*ExtraValue) ProtoMessage() {}
@@ -1475,6 +1475,7 @@ func (m *UserInfo) Unmarshal(dAtA []byte) error {
 func skipGenerated(dAtA []byte) (n int, err error) {
 	l := len(dAtA)
 	iNdEx := 0
+	depth := 0
 	for iNdEx < l {
 		var wire uint64
 		for shift := uint(0); ; shift += 7 {
@@ -1506,10 +1507,8 @@ func skipGenerated(dAtA []byte) (n int, err error) {
 					break
 				}
 			}
-			return iNdEx, nil
 		case 1:
 			iNdEx += 8
-			return iNdEx, nil
 		case 2:
 			var length int
 			for shift := uint(0); ; shift += 7 {
@@ -1530,55 +1529,30 @@ func skipGenerated(dAtA []byte) (n int, err error) {
 				return 0, ErrInvalidLengthGenerated
 			}
 			iNdEx += length
-			if iNdEx < 0 {
-				return 0, ErrInvalidLengthGenerated
-			}
-			return iNdEx, nil
 		case 3:
-			for {
-				var innerWire uint64
-				var start int = iNdEx
-				for shift := uint(0); ; shift += 7 {
-					if shift >= 64 {
-						return 0, ErrIntOverflowGenerated
-					}
-					if iNdEx >= l {
-						return 0, io.ErrUnexpectedEOF
-					}
-					b := dAtA[iNdEx]
-					iNdEx++
-					innerWire |= (uint64(b) & 0x7F) << shift
-					if b < 0x80 {
-						break
-					}
-				}
-				innerWireType := int(innerWire & 0x7)
-				if innerWireType == 4 {
-					break
-				}
-				next, err := skipGenerated(dAtA[start:])
-				if err != nil {
-					return 0, err
-				}
-				iNdEx = start + next
-				if iNdEx < 0 {
-					return 0, ErrInvalidLengthGenerated
-				}
-			}
-			return iNdEx, nil
+			depth++
 		case 4:
-			return iNdEx, nil
+			if depth == 0 {
+				return 0, ErrUnexpectedEndOfGroupGenerated
+			}
+			depth--
 		case 5:
 			iNdEx += 4
-			return iNdEx, nil
 		default:
 			return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
 		}
+		if iNdEx < 0 {
+			return 0, ErrInvalidLengthGenerated
+		}
+		if depth == 0 {
+			return iNdEx, nil
+		}
 	}
-	panic("unreachable")
+	return 0, io.ErrUnexpectedEOF
 }
 
 var (
-	ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling")
-	ErrIntOverflowGenerated   = fmt.Errorf("proto: integer overflow")
+	ErrInvalidLengthGenerated        = fmt.Errorf("proto: negative length found during unmarshaling")
+	ErrIntOverflowGenerated          = fmt.Errorf("proto: integer overflow")
+	ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group")
 )
diff --git a/vendor/k8s.io/api/authentication/v1beta1/types.go b/vendor/k8s.io/api/authentication/v1beta1/types.go
index 0b6cba82..0083fb0e 100644
--- a/vendor/k8s.io/api/authentication/v1beta1/types.go
+++ b/vendor/k8s.io/api/authentication/v1beta1/types.go
@@ -24,7 +24,7 @@ import (
 
 // +genclient
 // +genclient:nonNamespaced
-// +genclient:noVerbs
+// +genclient:onlyVerbs=create
 // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
 
 // TokenReview attempts to authenticate a token to a known user.
diff --git a/vendor/k8s.io/api/authorization/v1/generated.pb.go b/vendor/k8s.io/api/authorization/v1/generated.pb.go
index 0dc01bc9..dbc0bdc7 100644
--- a/vendor/k8s.io/api/authorization/v1/generated.pb.go
+++ b/vendor/k8s.io/api/authorization/v1/generated.pb.go
@@ -42,7 +42,7 @@ var _ = math.Inf
 // is compatible with the proto package it is being compiled against.
 // A compilation error at this line likely means your copy of the
 // proto package needs to be updated.
-const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
+const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
 
 func (m *ExtraValue) Reset()      { *m = ExtraValue{} }
 func (*ExtraValue) ProtoMessage() {}
@@ -4004,6 +4004,7 @@ func (m *SubjectRulesReviewStatus) Unmarshal(dAtA []byte) error {
 func skipGenerated(dAtA []byte) (n int, err error) {
 	l := len(dAtA)
 	iNdEx := 0
+	depth := 0
 	for iNdEx < l {
 		var wire uint64
 		for shift := uint(0); ; shift += 7 {
@@ -4035,10 +4036,8 @@ func skipGenerated(dAtA []byte) (n int, err error) {
 					break
 				}
 			}
-			return iNdEx, nil
 		case 1:
 			iNdEx += 8
-			return iNdEx, nil
 		case 2:
 			var length int
 			for shift := uint(0); ; shift += 7 {
@@ -4059,55 +4058,30 @@ func skipGenerated(dAtA []byte) (n int, err error) {
 				return 0, ErrInvalidLengthGenerated
 			}
 			iNdEx += length
-			if iNdEx < 0 {
-				return 0, ErrInvalidLengthGenerated
-			}
-			return iNdEx, nil
 		case 3:
-			for {
-				var innerWire uint64
-				var start int = iNdEx
-				for shift := uint(0); ; shift += 7 {
-					if shift >= 64 {
-						return 0, ErrIntOverflowGenerated
-					}
-					if iNdEx >= l {
-						return 0, io.ErrUnexpectedEOF
-					}
-					b := dAtA[iNdEx]
-					iNdEx++
-					innerWire |= (uint64(b) & 0x7F) << shift
-					if b < 0x80 {
-						break
-					}
-				}
-				innerWireType := int(innerWire & 0x7)
-				if innerWireType == 4 {
-					break
-				}
-				next, err := skipGenerated(dAtA[start:])
-				if err != nil {
-					return 0, err
-				}
-				iNdEx = start + next
-				if iNdEx < 0 {
-					return 0, ErrInvalidLengthGenerated
-				}
-			}
-			return iNdEx, nil
+			depth++
 		case 4:
-			return iNdEx, nil
+			if depth == 0 {
+				return 0, ErrUnexpectedEndOfGroupGenerated
+			}
+			depth--
 		case 5:
 			iNdEx += 4
-			return iNdEx, nil
 		default:
 			return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
 		}
+		if iNdEx < 0 {
+			return 0, ErrInvalidLengthGenerated
+		}
+		if depth == 0 {
+			return iNdEx, nil
+		}
 	}
-	panic("unreachable")
+	return 0, io.ErrUnexpectedEOF
 }
 
 var (
-	ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling")
-	ErrIntOverflowGenerated   = fmt.Errorf("proto: integer overflow")
+	ErrInvalidLengthGenerated        = fmt.Errorf("proto: negative length found during unmarshaling")
+	ErrIntOverflowGenerated          = fmt.Errorf("proto: integer overflow")
+	ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group")
 )
diff --git a/vendor/k8s.io/api/authorization/v1/types.go b/vendor/k8s.io/api/authorization/v1/types.go
index 86b05c54..be8913eb 100644
--- a/vendor/k8s.io/api/authorization/v1/types.go
+++ b/vendor/k8s.io/api/authorization/v1/types.go
@@ -24,7 +24,7 @@ import (
 
 // +genclient
 // +genclient:nonNamespaced
-// +genclient:noVerbs
+// +genclient:onlyVerbs=create
 // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
 
 // SubjectAccessReview checks whether or not a user or group can perform an action.
@@ -43,7 +43,7 @@ type SubjectAccessReview struct {
 
 // +genclient
 // +genclient:nonNamespaced
-// +genclient:noVerbs
+// +genclient:onlyVerbs=create
 // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
 
 // SelfSubjectAccessReview checks whether or the current user can perform an action.  Not filling in a
@@ -63,7 +63,7 @@ type SelfSubjectAccessReview struct {
 }
 
 // +genclient
-// +genclient:noVerbs
+// +genclient:onlyVerbs=create
 // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
 
 // LocalSubjectAccessReview checks whether or not a user or group can perform an action in a given namespace.
@@ -189,7 +189,7 @@ type SubjectAccessReviewStatus struct {
 
 // +genclient
 // +genclient:nonNamespaced
-// +genclient:noVerbs
+// +genclient:onlyVerbs=create
 // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
 
 // SelfSubjectRulesReview enumerates the set of actions the current user can perform within a namespace.
diff --git a/vendor/k8s.io/api/authorization/v1beta1/generated.pb.go b/vendor/k8s.io/api/authorization/v1beta1/generated.pb.go
index f0def20b..647c0c58 100644
--- a/vendor/k8s.io/api/authorization/v1beta1/generated.pb.go
+++ b/vendor/k8s.io/api/authorization/v1beta1/generated.pb.go
@@ -42,7 +42,7 @@ var _ = math.Inf
 // is compatible with the proto package it is being compiled against.
 // A compilation error at this line likely means your copy of the
 // proto package needs to be updated.
-const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
+const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
 
 func (m *ExtraValue) Reset()      { *m = ExtraValue{} }
 func (*ExtraValue) ProtoMessage() {}
@@ -4004,6 +4004,7 @@ func (m *SubjectRulesReviewStatus) Unmarshal(dAtA []byte) error {
 func skipGenerated(dAtA []byte) (n int, err error) {
 	l := len(dAtA)
 	iNdEx := 0
+	depth := 0
 	for iNdEx < l {
 		var wire uint64
 		for shift := uint(0); ; shift += 7 {
@@ -4035,10 +4036,8 @@ func skipGenerated(dAtA []byte) (n int, err error) {
 					break
 				}
 			}
-			return iNdEx, nil
 		case 1:
 			iNdEx += 8
-			return iNdEx, nil
 		case 2:
 			var length int
 			for shift := uint(0); ; shift += 7 {
@@ -4059,55 +4058,30 @@ func skipGenerated(dAtA []byte) (n int, err error) {
 				return 0, ErrInvalidLengthGenerated
 			}
 			iNdEx += length
-			if iNdEx < 0 {
-				return 0, ErrInvalidLengthGenerated
-			}
-			return iNdEx, nil
 		case 3:
-			for {
-				var innerWire uint64
-				var start int = iNdEx
-				for shift := uint(0); ; shift += 7 {
-					if shift >= 64 {
-						return 0, ErrIntOverflowGenerated
-					}
-					if iNdEx >= l {
-						return 0, io.ErrUnexpectedEOF
-					}
-					b := dAtA[iNdEx]
-					iNdEx++
-					innerWire |= (uint64(b) & 0x7F) << shift
-					if b < 0x80 {
-						break
-					}
-				}
-				innerWireType := int(innerWire & 0x7)
-				if innerWireType == 4 {
-					break
-				}
-				next, err := skipGenerated(dAtA[start:])
-				if err != nil {
-					return 0, err
-				}
-				iNdEx = start + next
-				if iNdEx < 0 {
-					return 0, ErrInvalidLengthGenerated
-				}
-			}
-			return iNdEx, nil
+			depth++
 		case 4:
-			return iNdEx, nil
+			if depth == 0 {
+				return 0, ErrUnexpectedEndOfGroupGenerated
+			}
+			depth--
 		case 5:
 			iNdEx += 4
-			return iNdEx, nil
 		default:
 			return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
 		}
+		if iNdEx < 0 {
+			return 0, ErrInvalidLengthGenerated
+		}
+		if depth == 0 {
+			return iNdEx, nil
+		}
 	}
-	panic("unreachable")
+	return 0, io.ErrUnexpectedEOF
 }
 
 var (
-	ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling")
-	ErrIntOverflowGenerated   = fmt.Errorf("proto: integer overflow")
+	ErrInvalidLengthGenerated        = fmt.Errorf("proto: negative length found during unmarshaling")
+	ErrIntOverflowGenerated          = fmt.Errorf("proto: integer overflow")
+	ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group")
 )
diff --git a/vendor/k8s.io/api/authorization/v1beta1/types.go b/vendor/k8s.io/api/authorization/v1beta1/types.go
index 618ff8c0..cf117d26 100644
--- a/vendor/k8s.io/api/authorization/v1beta1/types.go
+++ b/vendor/k8s.io/api/authorization/v1beta1/types.go
@@ -24,7 +24,7 @@ import (
 
 // +genclient
 // +genclient:nonNamespaced
-// +genclient:noVerbs
+// +genclient:onlyVerbs=create
 // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
 
 // SubjectAccessReview checks whether or not a user or group can perform an action.
@@ -43,7 +43,7 @@ type SubjectAccessReview struct {
 
 // +genclient
 // +genclient:nonNamespaced
-// +genclient:noVerbs
+// +genclient:onlyVerbs=create
 // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
 
 // SelfSubjectAccessReview checks whether or the current user can perform an action.  Not filling in a
@@ -63,7 +63,7 @@ type SelfSubjectAccessReview struct {
 }
 
 // +genclient
-// +genclient:noVerbs
+// +genclient:onlyVerbs=create
 // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
 
 // LocalSubjectAccessReview checks whether or not a user or group can perform an action in a given namespace.
@@ -189,7 +189,7 @@ type SubjectAccessReviewStatus struct {
 
 // +genclient
 // +genclient:nonNamespaced
-// +genclient:noVerbs
+// +genclient:onlyVerbs=create
 // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
 
 // SelfSubjectRulesReview enumerates the set of actions the current user can perform within a namespace.
diff --git a/vendor/k8s.io/api/autoscaling/v1/generated.pb.go b/vendor/k8s.io/api/autoscaling/v1/generated.pb.go
index 174e6f5f..1e3d8907 100644
--- a/vendor/k8s.io/api/autoscaling/v1/generated.pb.go
+++ b/vendor/k8s.io/api/autoscaling/v1/generated.pb.go
@@ -45,7 +45,7 @@ var _ = math.Inf
 // is compatible with the proto package it is being compiled against.
 // A compilation error at this line likely means your copy of the
 // proto package needs to be updated.
-const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
+const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
 
 func (m *CrossVersionObjectReference) Reset()      { *m = CrossVersionObjectReference{} }
 func (*CrossVersionObjectReference) ProtoMessage() {}
@@ -5487,6 +5487,7 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error {
 func skipGenerated(dAtA []byte) (n int, err error) {
 	l := len(dAtA)
 	iNdEx := 0
+	depth := 0
 	for iNdEx < l {
 		var wire uint64
 		for shift := uint(0); ; shift += 7 {
@@ -5518,10 +5519,8 @@ func skipGenerated(dAtA []byte) (n int, err error) {
 					break
 				}
 			}
-			return iNdEx, nil
 		case 1:
 			iNdEx += 8
-			return iNdEx, nil
 		case 2:
 			var length int
 			for shift := uint(0); ; shift += 7 {
@@ -5542,55 +5541,30 @@ func skipGenerated(dAtA []byte) (n int, err error) {
 				return 0, ErrInvalidLengthGenerated
 			}
 			iNdEx += length
-			if iNdEx < 0 {
-				return 0, ErrInvalidLengthGenerated
-			}
-			return iNdEx, nil
 		case 3:
-			for {
-				var innerWire uint64
-				var start int = iNdEx
-				for shift := uint(0); ; shift += 7 {
-					if shift >= 64 {
-						return 0, ErrIntOverflowGenerated
-					}
-					if iNdEx >= l {
-						return 0, io.ErrUnexpectedEOF
-					}
-					b := dAtA[iNdEx]
-					iNdEx++
-					innerWire |= (uint64(b) & 0x7F) << shift
-					if b < 0x80 {
-						break
-					}
-				}
-				innerWireType := int(innerWire & 0x7)
-				if innerWireType == 4 {
-					break
-				}
-				next, err := skipGenerated(dAtA[start:])
-				if err != nil {
-					return 0, err
-				}
-				iNdEx = start + next
-				if iNdEx < 0 {
-					return 0, ErrInvalidLengthGenerated
-				}
-			}
-			return iNdEx, nil
+			depth++
 		case 4:
-			return iNdEx, nil
+			if depth == 0 {
+				return 0, ErrUnexpectedEndOfGroupGenerated
+			}
+			depth--
 		case 5:
 			iNdEx += 4
-			return iNdEx, nil
 		default:
 			return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
 		}
+		if iNdEx < 0 {
+			return 0, ErrInvalidLengthGenerated
+		}
+		if depth == 0 {
+			return iNdEx, nil
+		}
 	}
-	panic("unreachable")
+	return 0, io.ErrUnexpectedEOF
 }
 
 var (
-	ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling")
-	ErrIntOverflowGenerated   = fmt.Errorf("proto: integer overflow")
+	ErrInvalidLengthGenerated        = fmt.Errorf("proto: negative length found during unmarshaling")
+	ErrIntOverflowGenerated          = fmt.Errorf("proto: integer overflow")
+	ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group")
 )
diff --git a/vendor/k8s.io/api/autoscaling/v2beta1/generated.pb.go b/vendor/k8s.io/api/autoscaling/v2beta1/generated.pb.go
index 0b6ed381..e129e41b 100644
--- a/vendor/k8s.io/api/autoscaling/v2beta1/generated.pb.go
+++ b/vendor/k8s.io/api/autoscaling/v2beta1/generated.pb.go
@@ -45,7 +45,7 @@ var _ = math.Inf
 // is compatible with the proto package it is being compiled against.
 // A compilation error at this line likely means your copy of the
 // proto package needs to be updated.
-const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
+const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
 
 func (m *CrossVersionObjectReference) Reset()      { *m = CrossVersionObjectReference{} }
 func (*CrossVersionObjectReference) ProtoMessage() {}
@@ -5012,6 +5012,7 @@ func (m *ResourceMetricStatus) Unmarshal(dAtA []byte) error {
 func skipGenerated(dAtA []byte) (n int, err error) {
 	l := len(dAtA)
 	iNdEx := 0
+	depth := 0
 	for iNdEx < l {
 		var wire uint64
 		for shift := uint(0); ; shift += 7 {
@@ -5043,10 +5044,8 @@ func skipGenerated(dAtA []byte) (n int, err error) {
 					break
 				}
 			}
-			return iNdEx, nil
 		case 1:
 			iNdEx += 8
-			return iNdEx, nil
 		case 2:
 			var length int
 			for shift := uint(0); ; shift += 7 {
@@ -5067,55 +5066,30 @@ func skipGenerated(dAtA []byte) (n int, err error) {
 				return 0, ErrInvalidLengthGenerated
 			}
 			iNdEx += length
-			if iNdEx < 0 {
-				return 0, ErrInvalidLengthGenerated
-			}
-			return iNdEx, nil
 		case 3:
-			for {
-				var innerWire uint64
-				var start int = iNdEx
-				for shift := uint(0); ; shift += 7 {
-					if shift >= 64 {
-						return 0, ErrIntOverflowGenerated
-					}
-					if iNdEx >= l {
-						return 0, io.ErrUnexpectedEOF
-					}
-					b := dAtA[iNdEx]
-					iNdEx++
-					innerWire |= (uint64(b) & 0x7F) << shift
-					if b < 0x80 {
-						break
-					}
-				}
-				innerWireType := int(innerWire & 0x7)
-				if innerWireType == 4 {
-					break
-				}
-				next, err := skipGenerated(dAtA[start:])
-				if err != nil {
-					return 0, err
-				}
-				iNdEx = start + next
-				if iNdEx < 0 {
-					return 0, ErrInvalidLengthGenerated
-				}
-			}
-			return iNdEx, nil
+			depth++
 		case 4:
-			return iNdEx, nil
+			if depth == 0 {
+				return 0, ErrUnexpectedEndOfGroupGenerated
+			}
+			depth--
 		case 5:
 			iNdEx += 4
-			return iNdEx, nil
 		default:
 			return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
 		}
+		if iNdEx < 0 {
+			return 0, ErrInvalidLengthGenerated
+		}
+		if depth == 0 {
+			return iNdEx, nil
+		}
 	}
-	panic("unreachable")
+	return 0, io.ErrUnexpectedEOF
 }
 
 var (
-	ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling")
-	ErrIntOverflowGenerated   = fmt.Errorf("proto: integer overflow")
+	ErrInvalidLengthGenerated        = fmt.Errorf("proto: negative length found during unmarshaling")
+	ErrIntOverflowGenerated          = fmt.Errorf("proto: integer overflow")
+	ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group")
 )
diff --git a/vendor/k8s.io/api/autoscaling/v2beta2/generated.pb.go b/vendor/k8s.io/api/autoscaling/v2beta2/generated.pb.go
index 23bc5b98..c69d6cb9 100644
--- a/vendor/k8s.io/api/autoscaling/v2beta2/generated.pb.go
+++ b/vendor/k8s.io/api/autoscaling/v2beta2/generated.pb.go
@@ -45,7 +45,7 @@ var _ = math.Inf
 // is compatible with the proto package it is being compiled against.
 // A compilation error at this line likely means your copy of the
 // proto package needs to be updated.
-const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
+const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
 
 func (m *CrossVersionObjectReference) Reset()      { *m = CrossVersionObjectReference{} }
 func (*CrossVersionObjectReference) ProtoMessage() {}
@@ -131,10 +131,66 @@ func (m *ExternalMetricStatus) XXX_DiscardUnknown() {
 
 var xxx_messageInfo_ExternalMetricStatus proto.InternalMessageInfo
 
+func (m *HPAScalingPolicy) Reset()      { *m = HPAScalingPolicy{} }
+func (*HPAScalingPolicy) ProtoMessage() {}
+func (*HPAScalingPolicy) Descriptor() ([]byte, []int) {
+	return fileDescriptor_592ad94d7d6be24f, []int{3}
+}
+func (m *HPAScalingPolicy) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *HPAScalingPolicy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *HPAScalingPolicy) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_HPAScalingPolicy.Merge(m, src)
+}
+func (m *HPAScalingPolicy) XXX_Size() int {
+	return m.Size()
+}
+func (m *HPAScalingPolicy) XXX_DiscardUnknown() {
+	xxx_messageInfo_HPAScalingPolicy.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_HPAScalingPolicy proto.InternalMessageInfo
+
+func (m *HPAScalingRules) Reset()      { *m = HPAScalingRules{} }
+func (*HPAScalingRules) ProtoMessage() {}
+func (*HPAScalingRules) Descriptor() ([]byte, []int) {
+	return fileDescriptor_592ad94d7d6be24f, []int{4}
+}
+func (m *HPAScalingRules) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *HPAScalingRules) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *HPAScalingRules) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_HPAScalingRules.Merge(m, src)
+}
+func (m *HPAScalingRules) XXX_Size() int {
+	return m.Size()
+}
+func (m *HPAScalingRules) XXX_DiscardUnknown() {
+	xxx_messageInfo_HPAScalingRules.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_HPAScalingRules proto.InternalMessageInfo
+
 func (m *HorizontalPodAutoscaler) Reset()      { *m = HorizontalPodAutoscaler{} }
 func (*HorizontalPodAutoscaler) ProtoMessage() {}
 func (*HorizontalPodAutoscaler) Descriptor() ([]byte, []int) {
-	return fileDescriptor_592ad94d7d6be24f, []int{3}
+	return fileDescriptor_592ad94d7d6be24f, []int{5}
 }
 func (m *HorizontalPodAutoscaler) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -159,10 +215,38 @@ func (m *HorizontalPodAutoscaler) XXX_DiscardUnknown() {
 
 var xxx_messageInfo_HorizontalPodAutoscaler proto.InternalMessageInfo
 
+func (m *HorizontalPodAutoscalerBehavior) Reset()      { *m = HorizontalPodAutoscalerBehavior{} }
+func (*HorizontalPodAutoscalerBehavior) ProtoMessage() {}
+func (*HorizontalPodAutoscalerBehavior) Descriptor() ([]byte, []int) {
+	return fileDescriptor_592ad94d7d6be24f, []int{6}
+}
+func (m *HorizontalPodAutoscalerBehavior) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *HorizontalPodAutoscalerBehavior) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *HorizontalPodAutoscalerBehavior) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_HorizontalPodAutoscalerBehavior.Merge(m, src)
+}
+func (m *HorizontalPodAutoscalerBehavior) XXX_Size() int {
+	return m.Size()
+}
+func (m *HorizontalPodAutoscalerBehavior) XXX_DiscardUnknown() {
+	xxx_messageInfo_HorizontalPodAutoscalerBehavior.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_HorizontalPodAutoscalerBehavior proto.InternalMessageInfo
+
 func (m *HorizontalPodAutoscalerCondition) Reset()      { *m = HorizontalPodAutoscalerCondition{} }
 func (*HorizontalPodAutoscalerCondition) ProtoMessage() {}
 func (*HorizontalPodAutoscalerCondition) Descriptor() ([]byte, []int) {
-	return fileDescriptor_592ad94d7d6be24f, []int{4}
+	return fileDescriptor_592ad94d7d6be24f, []int{7}
 }
 func (m *HorizontalPodAutoscalerCondition) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -190,7 +274,7 @@ var xxx_messageInfo_HorizontalPodAutoscalerCondition proto.InternalMessageInfo
 func (m *HorizontalPodAutoscalerList) Reset()      { *m = HorizontalPodAutoscalerList{} }
 func (*HorizontalPodAutoscalerList) ProtoMessage() {}
 func (*HorizontalPodAutoscalerList) Descriptor() ([]byte, []int) {
-	return fileDescriptor_592ad94d7d6be24f, []int{5}
+	return fileDescriptor_592ad94d7d6be24f, []int{8}
 }
 func (m *HorizontalPodAutoscalerList) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -218,7 +302,7 @@ var xxx_messageInfo_HorizontalPodAutoscalerList proto.InternalMessageInfo
 func (m *HorizontalPodAutoscalerSpec) Reset()      { *m = HorizontalPodAutoscalerSpec{} }
 func (*HorizontalPodAutoscalerSpec) ProtoMessage() {}
 func (*HorizontalPodAutoscalerSpec) Descriptor() ([]byte, []int) {
-	return fileDescriptor_592ad94d7d6be24f, []int{6}
+	return fileDescriptor_592ad94d7d6be24f, []int{9}
 }
 func (m *HorizontalPodAutoscalerSpec) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -246,7 +330,7 @@ var xxx_messageInfo_HorizontalPodAutoscalerSpec proto.InternalMessageInfo
 func (m *HorizontalPodAutoscalerStatus) Reset()      { *m = HorizontalPodAutoscalerStatus{} }
 func (*HorizontalPodAutoscalerStatus) ProtoMessage() {}
 func (*HorizontalPodAutoscalerStatus) Descriptor() ([]byte, []int) {
-	return fileDescriptor_592ad94d7d6be24f, []int{7}
+	return fileDescriptor_592ad94d7d6be24f, []int{10}
 }
 func (m *HorizontalPodAutoscalerStatus) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -274,7 +358,7 @@ var xxx_messageInfo_HorizontalPodAutoscalerStatus proto.InternalMessageInfo
 func (m *MetricIdentifier) Reset()      { *m = MetricIdentifier{} }
 func (*MetricIdentifier) ProtoMessage() {}
 func (*MetricIdentifier) Descriptor() ([]byte, []int) {
-	return fileDescriptor_592ad94d7d6be24f, []int{8}
+	return fileDescriptor_592ad94d7d6be24f, []int{11}
 }
 func (m *MetricIdentifier) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -302,7 +386,7 @@ var xxx_messageInfo_MetricIdentifier proto.InternalMessageInfo
 func (m *MetricSpec) Reset()      { *m = MetricSpec{} }
 func (*MetricSpec) ProtoMessage() {}
 func (*MetricSpec) Descriptor() ([]byte, []int) {
-	return fileDescriptor_592ad94d7d6be24f, []int{9}
+	return fileDescriptor_592ad94d7d6be24f, []int{12}
 }
 func (m *MetricSpec) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -330,7 +414,7 @@ var xxx_messageInfo_MetricSpec proto.InternalMessageInfo
 func (m *MetricStatus) Reset()      { *m = MetricStatus{} }
 func (*MetricStatus) ProtoMessage() {}
 func (*MetricStatus) Descriptor() ([]byte, []int) {
-	return fileDescriptor_592ad94d7d6be24f, []int{10}
+	return fileDescriptor_592ad94d7d6be24f, []int{13}
 }
 func (m *MetricStatus) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -358,7 +442,7 @@ var xxx_messageInfo_MetricStatus proto.InternalMessageInfo
 func (m *MetricTarget) Reset()      { *m = MetricTarget{} }
 func (*MetricTarget) ProtoMessage() {}
 func (*MetricTarget) Descriptor() ([]byte, []int) {
-	return fileDescriptor_592ad94d7d6be24f, []int{11}
+	return fileDescriptor_592ad94d7d6be24f, []int{14}
 }
 func (m *MetricTarget) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -386,7 +470,7 @@ var xxx_messageInfo_MetricTarget proto.InternalMessageInfo
 func (m *MetricValueStatus) Reset()      { *m = MetricValueStatus{} }
 func (*MetricValueStatus) ProtoMessage() {}
 func (*MetricValueStatus) Descriptor() ([]byte, []int) {
-	return fileDescriptor_592ad94d7d6be24f, []int{12}
+	return fileDescriptor_592ad94d7d6be24f, []int{15}
 }
 func (m *MetricValueStatus) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -414,7 +498,7 @@ var xxx_messageInfo_MetricValueStatus proto.InternalMessageInfo
 func (m *ObjectMetricSource) Reset()      { *m = ObjectMetricSource{} }
 func (*ObjectMetricSource) ProtoMessage() {}
 func (*ObjectMetricSource) Descriptor() ([]byte, []int) {
-	return fileDescriptor_592ad94d7d6be24f, []int{13}
+	return fileDescriptor_592ad94d7d6be24f, []int{16}
 }
 func (m *ObjectMetricSource) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -442,7 +526,7 @@ var xxx_messageInfo_ObjectMetricSource proto.InternalMessageInfo
 func (m *ObjectMetricStatus) Reset()      { *m = ObjectMetricStatus{} }
 func (*ObjectMetricStatus) ProtoMessage() {}
 func (*ObjectMetricStatus) Descriptor() ([]byte, []int) {
-	return fileDescriptor_592ad94d7d6be24f, []int{14}
+	return fileDescriptor_592ad94d7d6be24f, []int{17}
 }
 func (m *ObjectMetricStatus) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -470,7 +554,7 @@ var xxx_messageInfo_ObjectMetricStatus proto.InternalMessageInfo
 func (m *PodsMetricSource) Reset()      { *m = PodsMetricSource{} }
 func (*PodsMetricSource) ProtoMessage() {}
 func (*PodsMetricSource) Descriptor() ([]byte, []int) {
-	return fileDescriptor_592ad94d7d6be24f, []int{15}
+	return fileDescriptor_592ad94d7d6be24f, []int{18}
 }
 func (m *PodsMetricSource) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -498,7 +582,7 @@ var xxx_messageInfo_PodsMetricSource proto.InternalMessageInfo
 func (m *PodsMetricStatus) Reset()      { *m = PodsMetricStatus{} }
 func (*PodsMetricStatus) ProtoMessage() {}
 func (*PodsMetricStatus) Descriptor() ([]byte, []int) {
-	return fileDescriptor_592ad94d7d6be24f, []int{16}
+	return fileDescriptor_592ad94d7d6be24f, []int{19}
 }
 func (m *PodsMetricStatus) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -526,7 +610,7 @@ var xxx_messageInfo_PodsMetricStatus proto.InternalMessageInfo
 func (m *ResourceMetricSource) Reset()      { *m = ResourceMetricSource{} }
 func (*ResourceMetricSource) ProtoMessage() {}
 func (*ResourceMetricSource) Descriptor() ([]byte, []int) {
-	return fileDescriptor_592ad94d7d6be24f, []int{17}
+	return fileDescriptor_592ad94d7d6be24f, []int{20}
 }
 func (m *ResourceMetricSource) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -554,7 +638,7 @@ var xxx_messageInfo_ResourceMetricSource proto.InternalMessageInfo
 func (m *ResourceMetricStatus) Reset()      { *m = ResourceMetricStatus{} }
 func (*ResourceMetricStatus) ProtoMessage() {}
 func (*ResourceMetricStatus) Descriptor() ([]byte, []int) {
-	return fileDescriptor_592ad94d7d6be24f, []int{18}
+	return fileDescriptor_592ad94d7d6be24f, []int{21}
 }
 func (m *ResourceMetricStatus) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -583,7 +667,10 @@ func init() {
 	proto.RegisterType((*CrossVersionObjectReference)(nil), "k8s.io.api.autoscaling.v2beta2.CrossVersionObjectReference")
 	proto.RegisterType((*ExternalMetricSource)(nil), "k8s.io.api.autoscaling.v2beta2.ExternalMetricSource")
 	proto.RegisterType((*ExternalMetricStatus)(nil), "k8s.io.api.autoscaling.v2beta2.ExternalMetricStatus")
+	proto.RegisterType((*HPAScalingPolicy)(nil), "k8s.io.api.autoscaling.v2beta2.HPAScalingPolicy")
+	proto.RegisterType((*HPAScalingRules)(nil), "k8s.io.api.autoscaling.v2beta2.HPAScalingRules")
 	proto.RegisterType((*HorizontalPodAutoscaler)(nil), "k8s.io.api.autoscaling.v2beta2.HorizontalPodAutoscaler")
+	proto.RegisterType((*HorizontalPodAutoscalerBehavior)(nil), "k8s.io.api.autoscaling.v2beta2.HorizontalPodAutoscalerBehavior")
 	proto.RegisterType((*HorizontalPodAutoscalerCondition)(nil), "k8s.io.api.autoscaling.v2beta2.HorizontalPodAutoscalerCondition")
 	proto.RegisterType((*HorizontalPodAutoscalerList)(nil), "k8s.io.api.autoscaling.v2beta2.HorizontalPodAutoscalerList")
 	proto.RegisterType((*HorizontalPodAutoscalerSpec)(nil), "k8s.io.api.autoscaling.v2beta2.HorizontalPodAutoscalerSpec")
@@ -606,97 +693,111 @@ func init() {
 }
 
 var fileDescriptor_592ad94d7d6be24f = []byte{
-	// 1425 bytes of a gzipped FileDescriptorProto
-	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x58, 0xdd, 0x6f, 0x1b, 0xc5,
-	0x16, 0xcf, 0xda, 0x8e, 0x93, 0x8e, 0xd3, 0x24, 0x9d, 0x5b, 0xb5, 0x56, 0xaa, 0x6b, 0x47, 0xab,
-	0xab, 0xab, 0x52, 0xd1, 0x35, 0x31, 0xe1, 0x43, 0x42, 0x48, 0xc4, 0x01, 0xda, 0x8a, 0xa4, 0x2d,
-	0x93, 0xb4, 0x42, 0xa8, 0x45, 0x8c, 0x77, 0x4f, 0xdc, 0x21, 0xde, 0x5d, 0x6b, 0x76, 0x6c, 0x35,
-	0x45, 0x42, 0xbc, 0xf0, 0x8e, 0x40, 0xfc, 0x13, 0x88, 0x17, 0x5e, 0x90, 0x78, 0xe4, 0x43, 0xa8,
-	0x42, 0x08, 0xf5, 0xb1, 0x08, 0xc9, 0xa2, 0xe6, 0xbf, 0xe8, 0x13, 0xda, 0x99, 0xd9, 0xf5, 0xae,
-	0xed, 0xc4, 0x4e, 0x95, 0x14, 0xf5, 0xcd, 0x33, 0xe7, 0x9c, 0xdf, 0xf9, 0x9c, 0x73, 0xce, 0x1a,
-	0x5d, 0xda, 0x7d, 0x35, 0xb0, 0x98, 0x5f, 0xd9, 0x6d, 0xd7, 0x81, 0x7b, 0x20, 0x20, 0xa8, 0x74,
-	0xc0, 0x73, 0x7c, 0x5e, 0xd1, 0x04, 0xda, 0x62, 0x15, 0xda, 0x16, 0x7e, 0x60, 0xd3, 0x26, 0xf3,
-	0x1a, 0x95, 0x4e, 0xb5, 0x0e, 0x82, 0x56, 0x2b, 0x0d, 0xf0, 0x80, 0x53, 0x01, 0x8e, 0xd5, 0xe2,
-	0xbe, 0xf0, 0x71, 0x49, 0xf1, 0x5b, 0xb4, 0xc5, 0xac, 0x04, 0xbf, 0xa5, 0xf9, 0x97, 0x2e, 0x36,
-	0x98, 0xb8, 0xd3, 0xae, 0x5b, 0xb6, 0xef, 0x56, 0x1a, 0x7e, 0xc3, 0xaf, 0x48, 0xb1, 0x7a, 0x7b,
-	0x47, 0x9e, 0xe4, 0x41, 0xfe, 0x52, 0x70, 0x4b, 0x66, 0x42, 0xbd, 0xed, 0x73, 0xa8, 0x74, 0x56,
-	0x06, 0x55, 0x2e, 0xad, 0xf6, 0x79, 0x5c, 0x6a, 0xdf, 0x61, 0x1e, 0xf0, 0xbd, 0x4a, 0x6b, 0xb7,
-	0x21, 0x85, 0x38, 0x04, 0x7e, 0x9b, 0xdb, 0x70, 0x28, 0xa9, 0xa0, 0xe2, 0x82, 0xa0, 0xa3, 0x74,
-	0x55, 0xf6, 0x93, 0xe2, 0x6d, 0x4f, 0x30, 0x77, 0x58, 0xcd, 0xcb, 0xe3, 0x04, 0x02, 0xfb, 0x0e,
-	0xb8, 0x74, 0x50, 0xce, 0xfc, 0xca, 0x40, 0xe7, 0xd6, 0xb9, 0x1f, 0x04, 0x37, 0x81, 0x07, 0xcc,
-	0xf7, 0xae, 0xd5, 0x3f, 0x02, 0x5b, 0x10, 0xd8, 0x01, 0x0e, 0x9e, 0x0d, 0x78, 0x19, 0xe5, 0x76,
-	0x99, 0xe7, 0x14, 0x8d, 0x65, 0xe3, 0xfc, 0x89, 0xda, 0xdc, 0xfd, 0x6e, 0x79, 0xaa, 0xd7, 0x2d,
-	0xe7, 0xde, 0x61, 0x9e, 0x43, 0x24, 0x25, 0xe4, 0xf0, 0xa8, 0x0b, 0xc5, 0x4c, 0x9a, 0xe3, 0x2a,
-	0x75, 0x81, 0x48, 0x0a, 0xae, 0x22, 0x44, 0x5b, 0x4c, 0x2b, 0x28, 0x66, 0x25, 0x1f, 0xd6, 0x7c,
-	0x68, 0xed, 0xfa, 0x15, 0x4d, 0x21, 0x09, 0x2e, 0xf3, 0x17, 0x03, 0x9d, 0x7e, 0xeb, 0xae, 0x00,
-	0xee, 0xd1, 0xe6, 0x26, 0x08, 0xce, 0xec, 0x2d, 0x19, 0x5f, 0xfc, 0x1e, 0xca, 0xbb, 0xf2, 0x2c,
-	0x4d, 0x2a, 0x54, 0x5f, 0xb0, 0x0e, 0xae, 0x04, 0x4b, 0x49, 0x5f, 0x71, 0xc0, 0x13, 0x6c, 0x87,
-	0x01, 0xaf, 0xcd, 0x6b, 0xd5, 0x79, 0x45, 0x21, 0x1a, 0x0f, 0x6f, 0xa3, 0xbc, 0xa0, 0xbc, 0x01,
-	0x42, 0xba, 0x52, 0xa8, 0x3e, 0x3f, 0x19, 0xf2, 0xb6, 0x94, 0xe9, 0xa3, 0xaa, 0x33, 0xd1, 0x58,
-	0xe6, 0xef, 0xc3, 0x8e, 0x08, 0x2a, 0xda, 0xc1, 0x31, 0x3a, 0x72, 0x0b, 0xcd, 0xd8, 0x6d, 0xce,
-	0xc1, 0x8b, 0x3c, 0x59, 0x99, 0x0c, 0xfa, 0x26, 0x6d, 0xb6, 0x41, 0x59, 0x57, 0x5b, 0xd0, 0xd8,
-	0x33, 0xeb, 0x0a, 0x89, 0x44, 0x90, 0xe6, 0x0f, 0x19, 0x74, 0xf6, 0xb2, 0xcf, 0xd9, 0x3d, 0xdf,
-	0x13, 0xb4, 0x79, 0xdd, 0x77, 0xd6, 0x34, 0x20, 0x70, 0xfc, 0x21, 0x9a, 0x0d, 0x2b, 0xda, 0xa1,
-	0x82, 0x8e, 0xf0, 0x2a, 0x2e, 0x4c, 0xab, 0xb5, 0xdb, 0x08, 0x2f, 0x02, 0x2b, 0xe4, 0xb6, 0x3a,
-	0x2b, 0x96, 0x2a, 0xbb, 0x4d, 0x10, 0xb4, 0x5f, 0x19, 0xfd, 0x3b, 0x12, 0xa3, 0xe2, 0xdb, 0x28,
-	0x17, 0xb4, 0xc0, 0xd6, 0x8e, 0xbd, 0x36, 0xce, 0xb1, 0x7d, 0x0c, 0xdd, 0x6a, 0x81, 0xdd, 0x2f,
-	0xd5, 0xf0, 0x44, 0x24, 0x2c, 0x06, 0x94, 0x0f, 0x64, 0x00, 0x64, 0x99, 0x16, 0xaa, 0xaf, 0x3f,
-	0xa9, 0x02, 0x15, 0xc5, 0x38, 0x43, 0xea, 0x4c, 0x34, 0xb8, 0xf9, 0x59, 0x16, 0x2d, 0xef, 0x23,
-	0xb9, 0xee, 0x7b, 0x0e, 0x13, 0xcc, 0xf7, 0xf0, 0x65, 0x94, 0x13, 0x7b, 0x2d, 0xd0, 0x4f, 0x6f,
-	0x35, 0xb2, 0x76, 0x7b, 0xaf, 0x05, 0x8f, 0xbb, 0xe5, 0xff, 0x8d, 0x93, 0x0f, 0xf9, 0x88, 0x44,
-	0xc0, 0x1b, 0xb1, 0x57, 0x99, 0x14, 0x96, 0x36, 0xeb, 0x71, 0xb7, 0x3c, 0xa2, 0xff, 0x59, 0x31,
-	0x52, 0xda, 0x78, 0xdc, 0x41, 0xb8, 0x49, 0x03, 0xb1, 0xcd, 0xa9, 0x17, 0x28, 0x4d, 0xcc, 0x05,
-	0x1d, 0xaf, 0x0b, 0x93, 0xa5, 0x3b, 0x94, 0xa8, 0x2d, 0x69, 0x2b, 0xf0, 0xc6, 0x10, 0x1a, 0x19,
-	0xa1, 0x01, 0xff, 0x1f, 0xe5, 0x39, 0xd0, 0xc0, 0xf7, 0x8a, 0x39, 0xe9, 0x45, 0x1c, 0x5c, 0x22,
-	0x6f, 0x89, 0xa6, 0xe2, 0xe7, 0xd0, 0x8c, 0x0b, 0x41, 0x40, 0x1b, 0x50, 0x9c, 0x96, 0x8c, 0x71,
-	0x2d, 0x6f, 0xaa, 0x6b, 0x12, 0xd1, 0xcd, 0x3f, 0x0c, 0x74, 0x6e, 0x9f, 0x38, 0x6e, 0xb0, 0x40,
-	0xe0, 0x5b, 0x43, 0xf5, 0x6c, 0x4d, 0xe6, 0x60, 0x28, 0x2d, 0xab, 0x79, 0x51, 0xeb, 0x9e, 0x8d,
-	0x6e, 0x12, 0xb5, 0x7c, 0x0b, 0x4d, 0x33, 0x01, 0x6e, 0x98, 0x95, 0xec, 0xf9, 0x42, 0xf5, 0x95,
-	0x27, 0xac, 0xb5, 0xda, 0x49, 0xad, 0x63, 0xfa, 0x4a, 0x88, 0x46, 0x14, 0xa8, 0xf9, 0x67, 0x66,
-	0x5f, 0xdf, 0xc2, 0x82, 0xc7, 0x1f, 0xa3, 0x79, 0x79, 0xd2, 0xfd, 0x0a, 0x76, 0xb4, 0x87, 0x63,
-	0xdf, 0xd4, 0x01, 0xe3, 0xa2, 0x76, 0x46, 0x9b, 0x32, 0xbf, 0x95, 0x82, 0x26, 0x03, 0xaa, 0xf0,
-	0x0a, 0x2a, 0xb8, 0xcc, 0x23, 0xd0, 0x6a, 0x32, 0x9b, 0xaa, 0xb2, 0x9c, 0xae, 0x2d, 0xf4, 0xba,
-	0xe5, 0xc2, 0x66, 0xff, 0x9a, 0x24, 0x79, 0xf0, 0x4b, 0xa8, 0xe0, 0xd2, 0xbb, 0xb1, 0x48, 0x56,
-	0x8a, 0xfc, 0x47, 0xeb, 0x2b, 0x6c, 0xf6, 0x49, 0x24, 0xc9, 0x87, 0x6f, 0x84, 0xd5, 0x10, 0x76,
-	0xb7, 0xa0, 0x98, 0x93, 0x61, 0xbe, 0x30, 0x59, 0x33, 0x94, 0x2d, 0x22, 0x51, 0x39, 0x12, 0x82,
-	0x44, 0x58, 0xe6, 0x77, 0x39, 0xf4, 0xdf, 0x03, 0xdf, 0x3e, 0x7e, 0x1b, 0x61, 0xbf, 0x1e, 0x00,
-	0xef, 0x80, 0x73, 0x49, 0x0d, 0xdd, 0x70, 0xfa, 0x85, 0x31, 0xce, 0xd6, 0xce, 0x84, 0x65, 0x7f,
-	0x6d, 0x88, 0x4a, 0x46, 0x48, 0x60, 0x1b, 0x9d, 0x0c, 0x1f, 0x83, 0x0a, 0x28, 0xd3, 0x83, 0xf6,
-	0x70, 0x2f, 0xed, 0x54, 0xaf, 0x5b, 0x3e, 0xb9, 0x91, 0x04, 0x21, 0x69, 0x4c, 0xbc, 0x86, 0x16,
-	0x74, 0x7f, 0x1f, 0x08, 0xf0, 0x59, 0x1d, 0x81, 0x85, 0xf5, 0x34, 0x99, 0x0c, 0xf2, 0x87, 0x10,
-	0x0e, 0x04, 0x8c, 0x83, 0x13, 0x43, 0xe4, 0xd2, 0x10, 0x6f, 0xa6, 0xc9, 0x64, 0x90, 0x1f, 0x37,
-	0xd1, 0xbc, 0x46, 0xd5, 0xf1, 0x2e, 0x4e, 0xcb, 0x94, 0x4d, 0x38, 0x89, 0x75, 0xd3, 0x8d, 0x6b,
-	0x70, 0x3d, 0x85, 0x45, 0x06, 0xb0, 0xb1, 0x40, 0xc8, 0x8e, 0x5a, 0x5c, 0x50, 0xcc, 0x4b, 0x4d,
-	0x6f, 0x3c, 0xe1, 0x1b, 0x8c, 0x7b, 0x65, 0x7f, 0x7c, 0xc5, 0x57, 0x01, 0x49, 0xe8, 0x31, 0xbf,
-	0x34, 0xd0, 0xe2, 0xe0, 0x24, 0x8f, 0x77, 0x28, 0x63, 0xdf, 0x1d, 0xea, 0x36, 0x9a, 0x0d, 0xa0,
-	0x09, 0xb6, 0xf0, 0xb9, 0x2e, 0x80, 0x17, 0x27, 0xec, 0x44, 0xb4, 0x0e, 0xcd, 0x2d, 0x2d, 0x5a,
-	0x9b, 0x0b, 0x5b, 0x51, 0x74, 0x22, 0x31, 0xa4, 0xf9, 0x75, 0x16, 0xa1, 0x7e, 0xdd, 0xe3, 0xd5,
-	0xd4, 0xe8, 0x59, 0x1e, 0x18, 0x3d, 0x8b, 0xc9, 0x85, 0x2c, 0x31, 0x66, 0x6e, 0xa2, 0xbc, 0x2f,
-	0xfb, 0x81, 0xb6, 0xb0, 0x3a, 0x2e, 0x98, 0xf1, 0x84, 0x8f, 0xd1, 0x6a, 0x28, 0x6c, 0xe8, 0xba,
-	0xab, 0x68, 0x34, 0x7c, 0x15, 0xe5, 0x5a, 0xbe, 0x13, 0x8d, 0xe4, 0xb1, 0x7b, 0xd2, 0x75, 0xdf,
-	0x09, 0x52, 0x98, 0xb3, 0xa1, 0xed, 0xe1, 0x2d, 0x91, 0x38, 0xf8, 0x03, 0x34, 0x1b, 0xad, 0xeb,
-	0xb2, 0x44, 0x0b, 0xd5, 0xd5, 0x71, 0x98, 0x44, 0xf3, 0xa7, 0x70, 0x65, 0x30, 0x23, 0x0a, 0x89,
-	0x31, 0x43, 0x7c, 0xd0, 0x1b, 0x9f, 0x9c, 0x40, 0x13, 0xe0, 0x8f, 0x5a, 0x75, 0x15, 0x7e, 0x44,
-	0x21, 0x31, 0xa6, 0xf9, 0x4d, 0x16, 0xcd, 0xa5, 0x56, 0xc9, 0x7f, 0x23, 0x5d, 0xea, 0xad, 0x1d,
-	0x6d, 0xba, 0x14, 0xe6, 0xd1, 0xa7, 0x4b, 0xe1, 0x1e, 0x5f, 0xba, 0x12, 0xf8, 0x23, 0xd2, 0xf5,
-	0x53, 0x26, 0x4a, 0x97, 0x9a, 0x7f, 0x93, 0xa5, 0x4b, 0xf1, 0x26, 0xd2, 0x75, 0x0d, 0x4d, 0x77,
-	0xc2, 0x05, 0x5d, 0x67, 0xeb, 0xc0, 0x45, 0xc4, 0x8a, 0x9c, 0xb3, 0xde, 0x6d, 0x53, 0x4f, 0x30,
-	0xb1, 0x57, 0x3b, 0x11, 0x2e, 0x08, 0x72, 0xc3, 0x27, 0x0a, 0x07, 0x3b, 0x68, 0x8e, 0x76, 0x80,
-	0xd3, 0x06, 0xc8, 0x6b, 0x9d, 0xaf, 0xc3, 0xe2, 0x2e, 0xf6, 0xba, 0xe5, 0xb9, 0xb5, 0x04, 0x0e,
-	0x49, 0xa1, 0x86, 0x63, 0x50, 0x9f, 0x6f, 0x08, 0xd6, 0x64, 0xf7, 0xd4, 0x18, 0x54, 0x93, 0x41,
-	0x8e, 0xc1, 0xb5, 0x21, 0x2a, 0x19, 0x21, 0x61, 0x7e, 0x91, 0x41, 0xa7, 0x86, 0x3e, 0x53, 0xfa,
-	0x41, 0x31, 0x8e, 0x29, 0x28, 0x99, 0xa7, 0x18, 0x94, 0xec, 0xa1, 0x83, 0xf2, 0x73, 0x06, 0xe1,
-	0xe1, 0x26, 0x8a, 0x3f, 0x91, 0xa3, 0xd8, 0xe6, 0xac, 0x0e, 0x8e, 0x22, 0x1f, 0xc5, 0x6e, 0x97,
-	0x9c, 0xe3, 0x49, 0x6c, 0x32, 0xa8, 0xec, 0x78, 0xbe, 0xa4, 0x13, 0x1f, 0xcc, 0xd9, 0xa3, 0xfd,
-	0x60, 0x36, 0x7f, 0x1b, 0x0c, 0xe3, 0x33, 0xfd, 0x85, 0x3e, 0x2a, 0xfd, 0xd9, 0xa7, 0x98, 0x7e,
-	0xf3, 0x47, 0x03, 0x2d, 0x0e, 0x0e, 0xe1, 0x67, 0xee, 0x7f, 0x9b, 0x5f, 0xd3, 0x4e, 0x3c, 0xdb,
-	0xff, 0xd9, 0x7c, 0x6b, 0xa0, 0xd3, 0xa3, 0x56, 0x18, 0xbc, 0x9e, 0x5a, 0x3c, 0x2b, 0xc9, 0xc5,
-	0xf3, 0x71, 0xb7, 0x5c, 0x1e, 0xf1, 0xaf, 0x40, 0x04, 0x93, 0xd8, 0x4d, 0x8f, 0x27, 0x01, 0xdf,
-	0x0f, 0xdb, 0xac, 0x92, 0x70, 0x24, 0x36, 0x1f, 0x6b, 0xbc, 0x6b, 0x17, 0xef, 0x3f, 0x2a, 0x4d,
-	0x3d, 0x78, 0x54, 0x9a, 0x7a, 0xf8, 0xa8, 0x34, 0xf5, 0x69, 0xaf, 0x64, 0xdc, 0xef, 0x95, 0x8c,
-	0x07, 0xbd, 0x92, 0xf1, 0xb0, 0x57, 0x32, 0xfe, 0xea, 0x95, 0x8c, 0xcf, 0xff, 0x2e, 0x4d, 0xbd,
-	0x3f, 0xa3, 0xa1, 0xff, 0x09, 0x00, 0x00, 0xff, 0xff, 0x7e, 0xa0, 0xce, 0xf5, 0x16, 0x17, 0x00,
-	0x00,
+	// 1657 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x58, 0xdb, 0x6f, 0x1b, 0x45,
+	0x17, 0xcf, 0xda, 0xce, 0x6d, 0x9c, 0x5b, 0xa7, 0xfd, 0x5a, 0x2b, 0xd5, 0x67, 0x47, 0xfb, 0x55,
+	0x1f, 0x50, 0xd1, 0x35, 0x31, 0x01, 0x2a, 0x55, 0x08, 0xe2, 0x14, 0xda, 0xaa, 0x49, 0x1b, 0xc6,
+	0x69, 0x40, 0x28, 0xad, 0x18, 0xef, 0x4e, 0x9c, 0x21, 0xf6, 0xae, 0xb5, 0xb3, 0x76, 0x9b, 0x22,
+	0x21, 0x5e, 0x78, 0x47, 0x20, 0x5e, 0xf9, 0x03, 0x10, 0x42, 0xe2, 0x05, 0x89, 0x47, 0x2e, 0xaa,
+	0x2a, 0x84, 0x50, 0xdf, 0x28, 0x2f, 0x16, 0x35, 0xff, 0x45, 0x9e, 0xd0, 0x5c, 0x76, 0xbd, 0xbb,
+	0x76, 0x62, 0x27, 0x4a, 0x8a, 0xfa, 0xb6, 0x33, 0xe7, 0x9c, 0xdf, 0x99, 0x39, 0xf7, 0x59, 0x70,
+	0x65, 0xfb, 0x22, 0x33, 0xa8, 0x93, 0xdf, 0x6e, 0x94, 0x89, 0x6b, 0x13, 0x8f, 0xb0, 0x7c, 0x93,
+	0xd8, 0x96, 0xe3, 0xe6, 0x15, 0x01, 0xd7, 0x69, 0x1e, 0x37, 0x3c, 0x87, 0x99, 0xb8, 0x4a, 0xed,
+	0x4a, 0xbe, 0x59, 0x28, 0x13, 0x0f, 0x17, 0xf2, 0x15, 0x62, 0x13, 0x17, 0x7b, 0xc4, 0x32, 0xea,
+	0xae, 0xe3, 0x39, 0x30, 0x2b, 0xf9, 0x0d, 0x5c, 0xa7, 0x46, 0x88, 0xdf, 0x50, 0xfc, 0xb3, 0x17,
+	0x2a, 0xd4, 0xdb, 0x6a, 0x94, 0x0d, 0xd3, 0xa9, 0xe5, 0x2b, 0x4e, 0xc5, 0xc9, 0x0b, 0xb1, 0x72,
+	0x63, 0x53, 0xac, 0xc4, 0x42, 0x7c, 0x49, 0xb8, 0x59, 0x3d, 0xa4, 0xde, 0x74, 0x5c, 0x92, 0x6f,
+	0xce, 0xc7, 0x55, 0xce, 0x2e, 0x74, 0x78, 0x6a, 0xd8, 0xdc, 0xa2, 0x36, 0x71, 0x77, 0xf2, 0xf5,
+	0xed, 0x8a, 0x10, 0x72, 0x09, 0x73, 0x1a, 0xae, 0x49, 0x0e, 0x24, 0xc5, 0xf2, 0x35, 0xe2, 0xe1,
+	0x5e, 0xba, 0xf2, 0x7b, 0x49, 0xb9, 0x0d, 0xdb, 0xa3, 0xb5, 0x6e, 0x35, 0xaf, 0xf6, 0x13, 0x60,
+	0xe6, 0x16, 0xa9, 0xe1, 0xb8, 0x9c, 0xfe, 0xa5, 0x06, 0xce, 0x2e, 0xb9, 0x0e, 0x63, 0xeb, 0xc4,
+	0x65, 0xd4, 0xb1, 0x6f, 0x96, 0x3f, 0x24, 0xa6, 0x87, 0xc8, 0x26, 0x71, 0x89, 0x6d, 0x12, 0x38,
+	0x07, 0x52, 0xdb, 0xd4, 0xb6, 0x32, 0xda, 0x9c, 0xf6, 0xfc, 0x78, 0x71, 0xe2, 0x61, 0x2b, 0x37,
+	0xd4, 0x6e, 0xe5, 0x52, 0xd7, 0xa9, 0x6d, 0x21, 0x41, 0xe1, 0x1c, 0x36, 0xae, 0x91, 0x4c, 0x22,
+	0xca, 0x71, 0x03, 0xd7, 0x08, 0x12, 0x14, 0x58, 0x00, 0x00, 0xd7, 0xa9, 0x52, 0x90, 0x49, 0x0a,
+	0x3e, 0xa8, 0xf8, 0xc0, 0xe2, 0xea, 0x35, 0x45, 0x41, 0x21, 0x2e, 0xfd, 0x81, 0x06, 0x4e, 0xbd,
+	0x75, 0xcf, 0x23, 0xae, 0x8d, 0xab, 0x2b, 0xc4, 0x73, 0xa9, 0x59, 0x12, 0xf6, 0x85, 0xef, 0x81,
+	0x91, 0x9a, 0x58, 0x8b, 0x23, 0xa5, 0x0b, 0x2f, 0x19, 0xfb, 0x47, 0x82, 0x21, 0xa5, 0xaf, 0x59,
+	0xc4, 0xf6, 0xe8, 0x26, 0x25, 0x6e, 0x71, 0x4a, 0xa9, 0x1e, 0x91, 0x14, 0xa4, 0xf0, 0xe0, 0x1a,
+	0x18, 0xf1, 0xb0, 0x5b, 0x21, 0x9e, 0xb8, 0x4a, 0xba, 0xf0, 0xe2, 0x60, 0xc8, 0x6b, 0x42, 0xa6,
+	0x83, 0x2a, 0xd7, 0x48, 0x61, 0xe9, 0xbf, 0x77, 0x5f, 0xc4, 0xc3, 0x5e, 0x83, 0x1d, 0xe3, 0x45,
+	0x36, 0xc0, 0xa8, 0xd9, 0x70, 0x5d, 0x62, 0xfb, 0x37, 0x99, 0x1f, 0x0c, 0x7a, 0x1d, 0x57, 0x1b,
+	0x44, 0x9e, 0xae, 0x38, 0xad, 0xb0, 0x47, 0x97, 0x24, 0x12, 0xf2, 0x21, 0xf5, 0x6f, 0x35, 0x30,
+	0x73, 0x75, 0x75, 0xb1, 0x24, 0x21, 0x56, 0x9d, 0x2a, 0x35, 0x77, 0xe0, 0x45, 0x90, 0xf2, 0x76,
+	0xea, 0x44, 0x85, 0xc9, 0x39, 0x3f, 0x08, 0xd6, 0x76, 0xea, 0x64, 0xb7, 0x95, 0x3b, 0x15, 0xe7,
+	0xe7, 0xfb, 0x48, 0x48, 0xc0, 0xff, 0x81, 0xe1, 0x26, 0xd7, 0x2b, 0x8e, 0x3a, 0x5c, 0x9c, 0x54,
+	0xa2, 0xc3, 0xe2, 0x30, 0x48, 0xd2, 0xe0, 0x25, 0x30, 0x59, 0x27, 0x2e, 0x75, 0xac, 0x12, 0x31,
+	0x1d, 0xdb, 0x62, 0x22, 0x88, 0x86, 0x8b, 0xff, 0x51, 0xcc, 0x93, 0xab, 0x61, 0x22, 0x8a, 0xf2,
+	0xea, 0x5f, 0x25, 0xc0, 0x74, 0xe7, 0x00, 0xa8, 0x51, 0x25, 0x0c, 0xde, 0x01, 0xb3, 0xcc, 0xc3,
+	0x65, 0x5a, 0xa5, 0xf7, 0xb1, 0x47, 0x1d, 0xfb, 0x5d, 0x6a, 0x5b, 0xce, 0xdd, 0x28, 0x7a, 0xb6,
+	0xdd, 0xca, 0xcd, 0x96, 0xf6, 0xe4, 0x42, 0xfb, 0x20, 0xc0, 0xeb, 0x60, 0x82, 0x91, 0x2a, 0x31,
+	0x3d, 0x79, 0x5f, 0x65, 0x97, 0xe7, 0xda, 0xad, 0xdc, 0x44, 0x29, 0xb4, 0xbf, 0xdb, 0xca, 0x9d,
+	0x8c, 0x18, 0x46, 0x12, 0x51, 0x44, 0x18, 0xde, 0x01, 0x63, 0x75, 0xfe, 0x45, 0x09, 0xcb, 0x24,
+	0xe6, 0x92, 0x83, 0xc4, 0x4a, 0xdc, 0xe0, 0xc5, 0x19, 0x65, 0xaa, 0xb1, 0x55, 0x85, 0x84, 0x02,
+	0x4c, 0xfd, 0xc7, 0x04, 0x38, 0x73, 0xd5, 0x71, 0xe9, 0x7d, 0xc7, 0xf6, 0x70, 0x75, 0xd5, 0xb1,
+	0x16, 0x15, 0x22, 0x71, 0xe1, 0x07, 0x60, 0x8c, 0xd7, 0x28, 0x0b, 0x7b, 0xb8, 0x47, 0x9c, 0x06,
+	0xa5, 0xc6, 0xa8, 0x6f, 0x57, 0xf8, 0x06, 0x33, 0x38, 0xb7, 0xd1, 0x9c, 0x37, 0x64, 0x21, 0x59,
+	0x21, 0x1e, 0xee, 0xe4, 0x7a, 0x67, 0x0f, 0x05, 0xa8, 0xf0, 0x36, 0x48, 0xb1, 0x3a, 0x31, 0x55,
+	0xa8, 0x5e, 0xea, 0x7b, 0xb3, 0xde, 0x07, 0x2d, 0xd5, 0x89, 0xd9, 0x29, 0x3e, 0x7c, 0x85, 0x04,
+	0x2c, 0x24, 0x60, 0x84, 0x89, 0x90, 0x16, 0x5e, 0x4d, 0x17, 0x5e, 0x3f, 0xac, 0x02, 0x99, 0x17,
+	0x41, 0xce, 0xc9, 0x35, 0x52, 0xe0, 0xfa, 0x1f, 0x1a, 0xc8, 0xed, 0x21, 0x59, 0x24, 0x5b, 0xb8,
+	0x49, 0x1d, 0x17, 0xae, 0x83, 0x51, 0xb1, 0x73, 0xab, 0xae, 0x4c, 0x99, 0x1f, 0xdc, 0x8d, 0x22,
+	0x6c, 0x8b, 0x69, 0x9e, 0x91, 0x25, 0x89, 0x81, 0x7c, 0x30, 0xb8, 0x01, 0xc6, 0xc5, 0xe7, 0x65,
+	0xe7, 0xae, 0xad, 0xcc, 0x78, 0x60, 0xe4, 0xc9, 0x76, 0x2b, 0x37, 0x5e, 0xf2, 0x51, 0x50, 0x07,
+	0x50, 0xff, 0x34, 0x09, 0xe6, 0xf6, 0xb8, 0xd9, 0x92, 0x63, 0x5b, 0x94, 0x07, 0x3f, 0xbc, 0x1a,
+	0xc9, 0xff, 0x85, 0x58, 0xfe, 0x9f, 0xeb, 0x27, 0x1f, 0xaa, 0x07, 0xcb, 0x81, 0xbf, 0x12, 0x11,
+	0x2c, 0x65, 0xf0, 0xdd, 0x56, 0xae, 0x47, 0xaf, 0x36, 0x02, 0xa4, 0xa8, 0x5b, 0x60, 0x13, 0xc0,
+	0x2a, 0x66, 0xde, 0x9a, 0x8b, 0x6d, 0x26, 0x35, 0xd1, 0x1a, 0x51, 0x91, 0x70, 0x7e, 0xb0, 0x40,
+	0xe6, 0x12, 0xc5, 0x59, 0x75, 0x0a, 0xb8, 0xdc, 0x85, 0x86, 0x7a, 0x68, 0x80, 0xff, 0x07, 0x23,
+	0x2e, 0xc1, 0xcc, 0xb1, 0x33, 0x29, 0x71, 0x8b, 0x20, 0x6c, 0x90, 0xd8, 0x45, 0x8a, 0x0a, 0x5f,
+	0x00, 0xa3, 0x35, 0xc2, 0x18, 0xae, 0x90, 0xcc, 0xb0, 0x60, 0x0c, 0xea, 0xee, 0x8a, 0xdc, 0x46,
+	0x3e, 0x5d, 0xff, 0x53, 0x03, 0x67, 0xf7, 0xb0, 0xe3, 0x32, 0x65, 0x1e, 0xdc, 0xe8, 0xca, 0x54,
+	0x63, 0xb0, 0x0b, 0x72, 0x69, 0x91, 0xa7, 0x41, 0x8d, 0xf0, 0x77, 0x42, 0x59, 0xba, 0x01, 0x86,
+	0xa9, 0x47, 0x6a, 0x7e, 0x01, 0x7a, 0xed, 0x90, 0x59, 0xd4, 0xa9, 0xef, 0xd7, 0x38, 0x1a, 0x92,
+	0xa0, 0xfa, 0x83, 0xe4, 0x9e, 0x77, 0xe3, 0xa9, 0x0c, 0x3f, 0x02, 0x53, 0x62, 0xa5, 0x7a, 0x2b,
+	0xd9, 0x54, 0x37, 0xec, 0x5b, 0x2d, 0xf6, 0x19, 0x6d, 0x8a, 0xa7, 0xd5, 0x51, 0xa6, 0x4a, 0x11,
+	0x68, 0x14, 0x53, 0x05, 0xe7, 0x41, 0xba, 0x46, 0x6d, 0x44, 0xea, 0x55, 0x6a, 0x62, 0xa6, 0xfa,
+	0xd4, 0x74, 0xbb, 0x95, 0x4b, 0xaf, 0x74, 0xb6, 0x51, 0x98, 0x07, 0xbe, 0x02, 0xd2, 0x35, 0x7c,
+	0x2f, 0x10, 0x91, 0xfd, 0xe4, 0xa4, 0xd2, 0x97, 0x5e, 0xe9, 0x90, 0x50, 0x98, 0x0f, 0xde, 0xe2,
+	0xd1, 0xc0, 0x3b, 0x31, 0xcb, 0xa4, 0x84, 0x99, 0xcf, 0x0f, 0xd6, 0xb8, 0x45, 0xf1, 0x0b, 0x45,
+	0x8e, 0x80, 0x40, 0x3e, 0x16, 0xa4, 0x60, 0xac, 0xac, 0x6a, 0x90, 0x88, 0xb2, 0x74, 0xe1, 0x8d,
+	0xc3, 0xba, 0x4f, 0xc1, 0x14, 0x27, 0x78, 0x98, 0xf8, 0x2b, 0x14, 0xc0, 0xeb, 0xdf, 0xa7, 0xc0,
+	0x7f, 0xf7, 0x2d, 0xa0, 0xf0, 0x6d, 0x00, 0x9d, 0x32, 0x23, 0x6e, 0x93, 0x58, 0x57, 0xe4, 0x2c,
+	0xca, 0x87, 0x42, 0xee, 0xce, 0x64, 0xf1, 0x34, 0xcf, 0xb0, 0x9b, 0x5d, 0x54, 0xd4, 0x43, 0x02,
+	0x9a, 0x60, 0x92, 0xe7, 0x9d, 0xf4, 0x1d, 0x55, 0xf3, 0xe7, 0xc1, 0x92, 0xfa, 0x04, 0x1f, 0x1d,
+	0x96, 0xc3, 0x20, 0x28, 0x8a, 0x09, 0x17, 0xc1, 0xb4, 0x1a, 0x7b, 0x62, 0xbe, 0x3c, 0xa3, 0x8c,
+	0x3d, 0xbd, 0x14, 0x25, 0xa3, 0x38, 0x3f, 0x87, 0xb0, 0x08, 0xa3, 0x2e, 0xb1, 0x02, 0x88, 0x54,
+	0x14, 0xe2, 0x72, 0x94, 0x8c, 0xe2, 0xfc, 0xb0, 0x0a, 0xa6, 0x14, 0xaa, 0x72, 0x6d, 0x66, 0x58,
+	0x44, 0xc7, 0x80, 0x03, 0xaa, 0xea, 0x5c, 0x41, 0xb8, 0x2f, 0x45, 0xb0, 0x50, 0x0c, 0x1b, 0x7a,
+	0x00, 0x98, 0x7e, 0x35, 0x65, 0x99, 0x11, 0xa1, 0xe9, 0xcd, 0x43, 0xc6, 0x4b, 0x50, 0x96, 0x3b,
+	0x33, 0x40, 0xb0, 0xc5, 0x50, 0x48, 0x8f, 0xfe, 0x85, 0x06, 0x66, 0xe2, 0x03, 0x6e, 0xf0, 0xb4,
+	0xd0, 0xf6, 0x7c, 0x5a, 0xdc, 0x06, 0x63, 0x72, 0x54, 0x72, 0x5c, 0x15, 0x00, 0x2f, 0x0f, 0x58,
+	0xf4, 0x70, 0x99, 0x54, 0x4b, 0x4a, 0x54, 0x86, 0xb3, 0xbf, 0x42, 0x01, 0xa4, 0xfe, 0x75, 0x12,
+	0x80, 0x4e, 0x8a, 0xc1, 0x85, 0x48, 0x97, 0x9b, 0x8b, 0x75, 0xb9, 0x99, 0xf0, 0x3b, 0x25, 0xd4,
+	0xd1, 0xd6, 0xc1, 0x88, 0x23, 0x4a, 0x8f, 0x3a, 0x61, 0xa1, 0x9f, 0x31, 0x83, 0x31, 0x29, 0x40,
+	0x2b, 0x02, 0xde, 0x3b, 0x54, 0x01, 0x53, 0x68, 0xf0, 0x06, 0x48, 0xd5, 0x1d, 0xcb, 0x9f, 0x6b,
+	0xfa, 0x8e, 0x84, 0xab, 0x8e, 0xc5, 0x22, 0x98, 0x63, 0xfc, 0xec, 0x7c, 0x17, 0x09, 0x1c, 0x3e,
+	0x66, 0xfa, 0xaf, 0x58, 0x11, 0xa2, 0xe9, 0xc2, 0x42, 0x3f, 0x4c, 0xa4, 0xf8, 0x23, 0xb8, 0xc2,
+	0x98, 0x3e, 0x05, 0x05, 0x98, 0x1c, 0x9f, 0xa8, 0x87, 0x90, 0x2a, 0x43, 0x7d, 0xf1, 0x7b, 0xbd,
+	0x00, 0x25, 0xbe, 0x4f, 0x41, 0x01, 0xa6, 0xfe, 0x4d, 0x12, 0x4c, 0x44, 0x5e, 0x58, 0xff, 0x86,
+	0xbb, 0x64, 0xae, 0x1d, 0xad, 0xbb, 0x24, 0xe6, 0xd1, 0xbb, 0x4b, 0xe2, 0x1e, 0x9f, 0xbb, 0x42,
+	0xf8, 0x3d, 0xdc, 0xf5, 0x73, 0xc2, 0x77, 0x97, 0x6c, 0xb5, 0x83, 0xb9, 0x4b, 0xf2, 0x86, 0xdc,
+	0x75, 0x33, 0xfc, 0x7e, 0xec, 0x33, 0xf3, 0x18, 0xfe, 0xe5, 0x8c, 0x77, 0x1a, 0xd8, 0xf6, 0xa8,
+	0xb7, 0x53, 0x1c, 0xef, 0x7a, 0x6b, 0x5a, 0x60, 0x02, 0x37, 0x89, 0x8b, 0x2b, 0x44, 0x6c, 0x2b,
+	0x7f, 0x1d, 0x14, 0x77, 0x86, 0x3f, 0xf5, 0x16, 0x43, 0x38, 0x28, 0x82, 0xca, 0xdb, 0xa0, 0x5a,
+	0xdf, 0xf2, 0x82, 0x37, 0xa4, 0xea, 0x0c, 0xa2, 0x0d, 0x2e, 0x76, 0x51, 0x51, 0x0f, 0x09, 0xfd,
+	0xf3, 0x04, 0x38, 0xd1, 0xf5, 0x7a, 0xef, 0x18, 0x45, 0x3b, 0x26, 0xa3, 0x24, 0x9e, 0xa2, 0x51,
+	0x92, 0x07, 0x36, 0xca, 0x2f, 0x09, 0x00, 0xbb, 0x8b, 0x28, 0xfc, 0x58, 0xb4, 0x62, 0xd3, 0xa5,
+	0x65, 0x62, 0x49, 0xf2, 0x51, 0x8c, 0x91, 0xe1, 0x3e, 0x1e, 0xc6, 0x46, 0x71, 0x65, 0xc7, 0xf3,
+	0x83, 0x29, 0xf4, 0x1f, 0x29, 0x79, 0xb4, 0xff, 0x91, 0xf4, 0xdf, 0xe2, 0x66, 0x7c, 0xa6, 0x7f,
+	0x5c, 0xf5, 0x72, 0x7f, 0xf2, 0x29, 0xba, 0x5f, 0xff, 0x49, 0x03, 0x33, 0xf1, 0x26, 0xfc, 0xcc,
+	0xfd, 0xce, 0xfc, 0x35, 0x7a, 0x89, 0x67, 0xfb, 0x57, 0xe6, 0x77, 0x1a, 0x38, 0xd5, 0x6b, 0x84,
+	0x81, 0x4b, 0x91, 0xc1, 0x33, 0x1f, 0x1e, 0x3c, 0x77, 0x5b, 0xb9, 0x5c, 0x8f, 0x1f, 0x10, 0x3e,
+	0x4c, 0x68, 0x36, 0x3d, 0x1e, 0x07, 0xfc, 0xd0, 0x7d, 0x66, 0xe9, 0x84, 0x23, 0x39, 0xf3, 0xb1,
+	0xda, 0xbb, 0x78, 0xe1, 0xe1, 0x93, 0xec, 0xd0, 0xa3, 0x27, 0xd9, 0xa1, 0xc7, 0x4f, 0xb2, 0x43,
+	0x9f, 0xb4, 0xb3, 0xda, 0xc3, 0x76, 0x56, 0x7b, 0xd4, 0xce, 0x6a, 0x8f, 0xdb, 0x59, 0xed, 0xaf,
+	0x76, 0x56, 0xfb, 0xec, 0xef, 0xec, 0xd0, 0xfb, 0xa3, 0x0a, 0xfa, 0x9f, 0x00, 0x00, 0x00, 0xff,
+	0xff, 0x79, 0xae, 0x08, 0x04, 0x2d, 0x1a, 0x00, 0x00,
 }
 
 func (m *CrossVersionObjectReference) Marshal() (dAtA []byte, err error) {
@@ -823,6 +924,89 @@ func (m *ExternalMetricStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) {
 	return len(dAtA) - i, nil
 }
 
+func (m *HPAScalingPolicy) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *HPAScalingPolicy) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *HPAScalingPolicy) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	i = encodeVarintGenerated(dAtA, i, uint64(m.PeriodSeconds))
+	i--
+	dAtA[i] = 0x18
+	i = encodeVarintGenerated(dAtA, i, uint64(m.Value))
+	i--
+	dAtA[i] = 0x10
+	i -= len(m.Type)
+	copy(dAtA[i:], m.Type)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type)))
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *HPAScalingRules) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *HPAScalingRules) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *HPAScalingRules) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.StabilizationWindowSeconds != nil {
+		i = encodeVarintGenerated(dAtA, i, uint64(*m.StabilizationWindowSeconds))
+		i--
+		dAtA[i] = 0x18
+	}
+	if len(m.Policies) > 0 {
+		for iNdEx := len(m.Policies) - 1; iNdEx >= 0; iNdEx-- {
+			{
+				size, err := m.Policies[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintGenerated(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0x12
+		}
+	}
+	if m.SelectPolicy != nil {
+		i -= len(*m.SelectPolicy)
+		copy(dAtA[i:], *m.SelectPolicy)
+		i = encodeVarintGenerated(dAtA, i, uint64(len(*m.SelectPolicy)))
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+
 func (m *HorizontalPodAutoscaler) Marshal() (dAtA []byte, err error) {
 	size := m.Size()
 	dAtA = make([]byte, size)
@@ -876,6 +1060,53 @@ func (m *HorizontalPodAutoscaler) MarshalToSizedBuffer(dAtA []byte) (int, error)
 	return len(dAtA) - i, nil
 }
 
+func (m *HorizontalPodAutoscalerBehavior) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *HorizontalPodAutoscalerBehavior) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *HorizontalPodAutoscalerBehavior) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.ScaleDown != nil {
+		{
+			size, err := m.ScaleDown.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x12
+	}
+	if m.ScaleUp != nil {
+		{
+			size, err := m.ScaleUp.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+
 func (m *HorizontalPodAutoscalerCondition) Marshal() (dAtA []byte, err error) {
 	size := m.Size()
 	dAtA = make([]byte, size)
@@ -996,6 +1227,18 @@ func (m *HorizontalPodAutoscalerSpec) MarshalToSizedBuffer(dAtA []byte) (int, er
 	_ = i
 	var l int
 	_ = l
+	if m.Behavior != nil {
+		{
+			size, err := m.Behavior.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x2a
+	}
 	if len(m.Metrics) > 0 {
 		for iNdEx := len(m.Metrics) - 1; iNdEx >= 0; iNdEx-- {
 			{
@@ -1726,6 +1969,41 @@ func (m *ExternalMetricStatus) Size() (n int) {
 	return n
 }
 
+func (m *HPAScalingPolicy) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.Type)
+	n += 1 + l + sovGenerated(uint64(l))
+	n += 1 + sovGenerated(uint64(m.Value))
+	n += 1 + sovGenerated(uint64(m.PeriodSeconds))
+	return n
+}
+
+func (m *HPAScalingRules) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.SelectPolicy != nil {
+		l = len(*m.SelectPolicy)
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	if len(m.Policies) > 0 {
+		for _, e := range m.Policies {
+			l = e.Size()
+			n += 1 + l + sovGenerated(uint64(l))
+		}
+	}
+	if m.StabilizationWindowSeconds != nil {
+		n += 1 + sovGenerated(uint64(*m.StabilizationWindowSeconds))
+	}
+	return n
+}
+
 func (m *HorizontalPodAutoscaler) Size() (n int) {
 	if m == nil {
 		return 0
@@ -1741,6 +2019,23 @@ func (m *HorizontalPodAutoscaler) Size() (n int) {
 	return n
 }
 
+func (m *HorizontalPodAutoscalerBehavior) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.ScaleUp != nil {
+		l = m.ScaleUp.Size()
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	if m.ScaleDown != nil {
+		l = m.ScaleDown.Size()
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	return n
+}
+
 func (m *HorizontalPodAutoscalerCondition) Size() (n int) {
 	if m == nil {
 		return 0
@@ -1795,6 +2090,10 @@ func (m *HorizontalPodAutoscalerSpec) Size() (n int) {
 			n += 1 + l + sovGenerated(uint64(l))
 		}
 	}
+	if m.Behavior != nil {
+		l = m.Behavior.Size()
+		n += 1 + l + sovGenerated(uint64(l))
+	}
 	return n
 }
 
@@ -2061,6 +2360,35 @@ func (this *ExternalMetricStatus) String() string {
 	}, "")
 	return s
 }
+func (this *HPAScalingPolicy) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&HPAScalingPolicy{`,
+		`Type:` + fmt.Sprintf("%v", this.Type) + `,`,
+		`Value:` + fmt.Sprintf("%v", this.Value) + `,`,
+		`PeriodSeconds:` + fmt.Sprintf("%v", this.PeriodSeconds) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *HPAScalingRules) String() string {
+	if this == nil {
+		return "nil"
+	}
+	repeatedStringForPolicies := "[]HPAScalingPolicy{"
+	for _, f := range this.Policies {
+		repeatedStringForPolicies += strings.Replace(strings.Replace(f.String(), "HPAScalingPolicy", "HPAScalingPolicy", 1), `&`, ``, 1) + ","
+	}
+	repeatedStringForPolicies += "}"
+	s := strings.Join([]string{`&HPAScalingRules{`,
+		`SelectPolicy:` + valueToStringGenerated(this.SelectPolicy) + `,`,
+		`Policies:` + repeatedStringForPolicies + `,`,
+		`StabilizationWindowSeconds:` + valueToStringGenerated(this.StabilizationWindowSeconds) + `,`,
+		`}`,
+	}, "")
+	return s
+}
 func (this *HorizontalPodAutoscaler) String() string {
 	if this == nil {
 		return "nil"
@@ -2073,6 +2401,17 @@ func (this *HorizontalPodAutoscaler) String() string {
 	}, "")
 	return s
 }
+func (this *HorizontalPodAutoscalerBehavior) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&HorizontalPodAutoscalerBehavior{`,
+		`ScaleUp:` + strings.Replace(this.ScaleUp.String(), "HPAScalingRules", "HPAScalingRules", 1) + `,`,
+		`ScaleDown:` + strings.Replace(this.ScaleDown.String(), "HPAScalingRules", "HPAScalingRules", 1) + `,`,
+		`}`,
+	}, "")
+	return s
+}
 func (this *HorizontalPodAutoscalerCondition) String() string {
 	if this == nil {
 		return "nil"
@@ -2117,6 +2456,7 @@ func (this *HorizontalPodAutoscalerSpec) String() string {
 		`MinReplicas:` + valueToStringGenerated(this.MinReplicas) + `,`,
 		`MaxReplicas:` + fmt.Sprintf("%v", this.MaxReplicas) + `,`,
 		`Metrics:` + repeatedStringForMetrics + `,`,
+		`Behavior:` + strings.Replace(this.Behavior.String(), "HorizontalPodAutoscalerBehavior", "HorizontalPodAutoscalerBehavior", 1) + `,`,
 		`}`,
 	}, "")
 	return s
@@ -2673,6 +3013,269 @@ func (m *ExternalMetricStatus) Unmarshal(dAtA []byte) error {
 	}
 	return nil
 }
+func (m *HPAScalingPolicy) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: HPAScalingPolicy: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: HPAScalingPolicy: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Type = HPAScalingPolicyType(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType)
+			}
+			m.Value = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Value |= int32(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 3:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field PeriodSeconds", wireType)
+			}
+			m.PeriodSeconds = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.PeriodSeconds |= int32(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *HPAScalingRules) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: HPAScalingRules: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: HPAScalingRules: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field SelectPolicy", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			s := ScalingPolicySelect(dAtA[iNdEx:postIndex])
+			m.SelectPolicy = &s
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Policies", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Policies = append(m.Policies, HPAScalingPolicy{})
+			if err := m.Policies[len(m.Policies)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 3:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field StabilizationWindowSeconds", wireType)
+			}
+			var v int32
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int32(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.StabilizationWindowSeconds = &v
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
 func (m *HorizontalPodAutoscaler) Unmarshal(dAtA []byte) error {
 	l := len(dAtA)
 	iNdEx := 0
@@ -2825,6 +3428,131 @@ func (m *HorizontalPodAutoscaler) Unmarshal(dAtA []byte) error {
 	}
 	return nil
 }
+func (m *HorizontalPodAutoscalerBehavior) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: HorizontalPodAutoscalerBehavior: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: HorizontalPodAutoscalerBehavior: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ScaleUp", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.ScaleUp == nil {
+				m.ScaleUp = &HPAScalingRules{}
+			}
+			if err := m.ScaleUp.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ScaleDown", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.ScaleDown == nil {
+				m.ScaleDown = &HPAScalingRules{}
+			}
+			if err := m.ScaleDown.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
 func (m *HorizontalPodAutoscalerCondition) Unmarshal(dAtA []byte) error {
 	l := len(dAtA)
 	iNdEx := 0
@@ -3294,6 +4022,42 @@ func (m *HorizontalPodAutoscalerSpec) Unmarshal(dAtA []byte) error {
 				return err
 			}
 			iNdEx = postIndex
+		case 5:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Behavior", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Behavior == nil {
+				m.Behavior = &HorizontalPodAutoscalerBehavior{}
+			}
+			if err := m.Behavior.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
 		default:
 			iNdEx = preIndex
 			skippy, err := skipGenerated(dAtA[iNdEx:])
@@ -5215,6 +5979,7 @@ func (m *ResourceMetricStatus) Unmarshal(dAtA []byte) error {
 func skipGenerated(dAtA []byte) (n int, err error) {
 	l := len(dAtA)
 	iNdEx := 0
+	depth := 0
 	for iNdEx < l {
 		var wire uint64
 		for shift := uint(0); ; shift += 7 {
@@ -5246,10 +6011,8 @@ func skipGenerated(dAtA []byte) (n int, err error) {
 					break
 				}
 			}
-			return iNdEx, nil
 		case 1:
 			iNdEx += 8
-			return iNdEx, nil
 		case 2:
 			var length int
 			for shift := uint(0); ; shift += 7 {
@@ -5270,55 +6033,30 @@ func skipGenerated(dAtA []byte) (n int, err error) {
 				return 0, ErrInvalidLengthGenerated
 			}
 			iNdEx += length
-			if iNdEx < 0 {
-				return 0, ErrInvalidLengthGenerated
-			}
-			return iNdEx, nil
 		case 3:
-			for {
-				var innerWire uint64
-				var start int = iNdEx
-				for shift := uint(0); ; shift += 7 {
-					if shift >= 64 {
-						return 0, ErrIntOverflowGenerated
-					}
-					if iNdEx >= l {
-						return 0, io.ErrUnexpectedEOF
-					}
-					b := dAtA[iNdEx]
-					iNdEx++
-					innerWire |= (uint64(b) & 0x7F) << shift
-					if b < 0x80 {
-						break
-					}
-				}
-				innerWireType := int(innerWire & 0x7)
-				if innerWireType == 4 {
-					break
-				}
-				next, err := skipGenerated(dAtA[start:])
-				if err != nil {
-					return 0, err
-				}
-				iNdEx = start + next
-				if iNdEx < 0 {
-					return 0, ErrInvalidLengthGenerated
-				}
-			}
-			return iNdEx, nil
+			depth++
 		case 4:
-			return iNdEx, nil
+			if depth == 0 {
+				return 0, ErrUnexpectedEndOfGroupGenerated
+			}
+			depth--
 		case 5:
 			iNdEx += 4
-			return iNdEx, nil
 		default:
 			return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
 		}
+		if iNdEx < 0 {
+			return 0, ErrInvalidLengthGenerated
+		}
+		if depth == 0 {
+			return iNdEx, nil
+		}
 	}
-	panic("unreachable")
+	return 0, io.ErrUnexpectedEOF
 }
 
 var (
-	ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling")
-	ErrIntOverflowGenerated   = fmt.Errorf("proto: integer overflow")
+	ErrInvalidLengthGenerated        = fmt.Errorf("proto: negative length found during unmarshaling")
+	ErrIntOverflowGenerated          = fmt.Errorf("proto: integer overflow")
+	ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group")
 )
diff --git a/vendor/k8s.io/api/autoscaling/v2beta2/generated.proto b/vendor/k8s.io/api/autoscaling/v2beta2/generated.proto
index 80f1d345..24dc5882 100644
--- a/vendor/k8s.io/api/autoscaling/v2beta2/generated.proto
+++ b/vendor/k8s.io/api/autoscaling/v2beta2/generated.proto
@@ -64,6 +64,47 @@ message ExternalMetricStatus {
   optional MetricValueStatus current = 2;
 }
 
+// HPAScalingPolicy is a single policy which must hold true for a specified past interval.
+message HPAScalingPolicy {
+  // Type is used to specify the scaling policy.
+  optional string type = 1;
+
+  // Value contains the amount of change which is permitted by the policy.
+  // It must be greater than zero
+  optional int32 value = 2;
+
+  // PeriodSeconds specifies the window of time for which the policy should hold true.
+  // PeriodSeconds must be greater than zero and less than or equal to 1800 (30 min).
+  optional int32 periodSeconds = 3;
+}
+
+// HPAScalingRules configures the scaling behavior for one direction.
+// These Rules are applied after calculating DesiredReplicas from metrics for the HPA.
+// They can limit the scaling velocity by specifying scaling policies.
+// They can prevent flapping by specifying the stabilization window, so that the
+// number of replicas is not set instantly, instead, the safest value from the stabilization
+// window is chosen.
+message HPAScalingRules {
+  // StabilizationWindowSeconds is the number of seconds for which past recommendations should be
+  // considered while scaling up or scaling down.
+  // StabilizationWindowSeconds must be greater than or equal to zero and less than or equal to 3600 (one hour).
+  // If not set, use the default values:
+  // - For scale up: 0 (i.e. no stabilization is done).
+  // - For scale down: 300 (i.e. the stabilization window is 300 seconds long).
+  // +optional
+  optional int32 stabilizationWindowSeconds = 3;
+
+  // selectPolicy is used to specify which policy should be used.
+  // If not set, the default value MaxPolicySelect is used.
+  // +optional
+  optional string selectPolicy = 1;
+
+  // policies is a list of potential scaling polices which can be used during scaling.
+  // At least one policy must be specified, otherwise the HPAScalingRules will be discarded as invalid
+  // +optional
+  repeated HPAScalingPolicy policies = 2;
+}
+
 // HorizontalPodAutoscaler is the configuration for a horizontal pod
 // autoscaler, which automatically manages the replica count of any resource
 // implementing the scale subresource based on the metrics specified.
@@ -83,6 +124,25 @@ message HorizontalPodAutoscaler {
   optional HorizontalPodAutoscalerStatus status = 3;
 }
 
+// HorizontalPodAutoscalerBehavior configures the scaling behavior of the target
+// in both Up and Down directions (scaleUp and scaleDown fields respectively).
+message HorizontalPodAutoscalerBehavior {
+  // scaleUp is scaling policy for scaling Up.
+  // If not set, the default value is the higher of:
+  //   * increase no more than 4 pods per 60 seconds
+  //   * double the number of pods per 60 seconds
+  // No stabilization is used.
+  // +optional
+  optional HPAScalingRules scaleUp = 1;
+
+  // scaleDown is scaling policy for scaling Down.
+  // If not set, the default value is to allow to scale down to minReplicas pods, with a
+  // 300 second stabilization window (i.e., the highest recommendation for
+  // the last 300sec is used).
+  // +optional
+  optional HPAScalingRules scaleDown = 2;
+}
+
 // HorizontalPodAutoscalerCondition describes the state of
 // a HorizontalPodAutoscaler at a certain point.
 message HorizontalPodAutoscalerCondition {
@@ -145,6 +205,12 @@ message HorizontalPodAutoscalerSpec {
   // If not set, the default metric will be set to 80% average CPU utilization.
   // +optional
   repeated MetricSpec metrics = 4;
+
+  // behavior configures the scaling behavior of the target
+  // in both Up and Down directions (scaleUp and scaleDown fields respectively).
+  // If not set, the default HPAScalingRules for scale up and scale down are used.
+  // +optional
+  optional HorizontalPodAutoscalerBehavior behavior = 5;
 }
 
 // HorizontalPodAutoscalerStatus describes the current status of a horizontal pod autoscaler.
diff --git a/vendor/k8s.io/api/autoscaling/v2beta2/types.go b/vendor/k8s.io/api/autoscaling/v2beta2/types.go
index 4480c7da..614caeb6 100644
--- a/vendor/k8s.io/api/autoscaling/v2beta2/types.go
+++ b/vendor/k8s.io/api/autoscaling/v2beta2/types.go
@@ -72,6 +72,12 @@ type HorizontalPodAutoscalerSpec struct {
 	// If not set, the default metric will be set to 80% average CPU utilization.
 	// +optional
 	Metrics []MetricSpec `json:"metrics,omitempty" protobuf:"bytes,4,rep,name=metrics"`
+
+	// behavior configures the scaling behavior of the target
+	// in both Up and Down directions (scaleUp and scaleDown fields respectively).
+	// If not set, the default HPAScalingRules for scale up and scale down are used.
+	// +optional
+	Behavior *HorizontalPodAutoscalerBehavior `json:"behavior,omitempty" protobuf:"bytes,5,opt,name=behavior"`
 }
 
 // CrossVersionObjectReference contains enough information to let you identify the referred resource.
@@ -117,6 +123,84 @@ type MetricSpec struct {
 	External *ExternalMetricSource `json:"external,omitempty" protobuf:"bytes,5,opt,name=external"`
 }
 
+// HorizontalPodAutoscalerBehavior configures the scaling behavior of the target
+// in both Up and Down directions (scaleUp and scaleDown fields respectively).
+type HorizontalPodAutoscalerBehavior struct {
+	// scaleUp is scaling policy for scaling Up.
+	// If not set, the default value is the higher of:
+	//   * increase no more than 4 pods per 60 seconds
+	//   * double the number of pods per 60 seconds
+	// No stabilization is used.
+	// +optional
+	ScaleUp *HPAScalingRules `json:"scaleUp,omitempty" protobuf:"bytes,1,opt,name=scaleUp"`
+	// scaleDown is scaling policy for scaling Down.
+	// If not set, the default value is to allow to scale down to minReplicas pods, with a
+	// 300 second stabilization window (i.e., the highest recommendation for
+	// the last 300sec is used).
+	// +optional
+	ScaleDown *HPAScalingRules `json:"scaleDown,omitempty" protobuf:"bytes,2,opt,name=scaleDown"`
+}
+
+// ScalingPolicySelect is used to specify which policy should be used while scaling in a certain direction
+type ScalingPolicySelect string
+
+const (
+	// MaxPolicySelect selects the policy with the highest possible change.
+	MaxPolicySelect ScalingPolicySelect = "Max"
+	// MinPolicySelect selects the policy with the lowest possible change.
+	MinPolicySelect ScalingPolicySelect = "Min"
+	// DisabledPolicySelect disables the scaling in this direction.
+	DisabledPolicySelect ScalingPolicySelect = "Disabled"
+)
+
+// HPAScalingRules configures the scaling behavior for one direction.
+// These Rules are applied after calculating DesiredReplicas from metrics for the HPA.
+// They can limit the scaling velocity by specifying scaling policies.
+// They can prevent flapping by specifying the stabilization window, so that the
+// number of replicas is not set instantly, instead, the safest value from the stabilization
+// window is chosen.
+type HPAScalingRules struct {
+	// StabilizationWindowSeconds is the number of seconds for which past recommendations should be
+	// considered while scaling up or scaling down.
+	// StabilizationWindowSeconds must be greater than or equal to zero and less than or equal to 3600 (one hour).
+	// If not set, use the default values:
+	// - For scale up: 0 (i.e. no stabilization is done).
+	// - For scale down: 300 (i.e. the stabilization window is 300 seconds long).
+	// +optional
+	StabilizationWindowSeconds *int32 `json:"stabilizationWindowSeconds" protobuf:"varint,3,opt,name=stabilizationWindowSeconds"`
+	// selectPolicy is used to specify which policy should be used.
+	// If not set, the default value MaxPolicySelect is used.
+	// +optional
+	SelectPolicy *ScalingPolicySelect `json:"selectPolicy,omitempty" protobuf:"bytes,1,opt,name=selectPolicy"`
+	// policies is a list of potential scaling polices which can be used during scaling.
+	// At least one policy must be specified, otherwise the HPAScalingRules will be discarded as invalid
+	// +optional
+	Policies []HPAScalingPolicy `json:"policies,omitempty" protobuf:"bytes,2,rep,name=policies"`
+}
+
+// HPAScalingPolicyType is the type of the policy which could be used while making scaling decisions.
+type HPAScalingPolicyType string
+
+const (
+	// PodsScalingPolicy is a policy used to specify a change in absolute number of pods.
+	PodsScalingPolicy HPAScalingPolicyType = "Pods"
+	// PercentScalingPolicy is a policy used to specify a relative amount of change with respect to
+	// the current number of pods.
+	PercentScalingPolicy HPAScalingPolicyType = "Percent"
+)
+
+// HPAScalingPolicy is a single policy which must hold true for a specified past interval.
+type HPAScalingPolicy struct {
+	// Type is used to specify the scaling policy.
+	Type HPAScalingPolicyType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=HPAScalingPolicyType"`
+	// Value contains the amount of change which is permitted by the policy.
+	// It must be greater than zero
+	Value int32 `json:"value" protobuf:"varint,2,opt,name=value"`
+	// PeriodSeconds specifies the window of time for which the policy should hold true.
+	// PeriodSeconds must be greater than zero and less than or equal to 1800 (30 min).
+	PeriodSeconds int32 `json:"periodSeconds" protobuf:"varint,3,opt,name=periodSeconds"`
+}
+
 // MetricSourceType indicates the type of metric.
 type MetricSourceType string
 
diff --git a/vendor/k8s.io/api/autoscaling/v2beta2/types_swagger_doc_generated.go b/vendor/k8s.io/api/autoscaling/v2beta2/types_swagger_doc_generated.go
index bb85b9f0..3f38880f 100644
--- a/vendor/k8s.io/api/autoscaling/v2beta2/types_swagger_doc_generated.go
+++ b/vendor/k8s.io/api/autoscaling/v2beta2/types_swagger_doc_generated.go
@@ -58,6 +58,28 @@ func (ExternalMetricStatus) SwaggerDoc() map[string]string {
 	return map_ExternalMetricStatus
 }
 
+var map_HPAScalingPolicy = map[string]string{
+	"":              "HPAScalingPolicy is a single policy which must hold true for a specified past interval.",
+	"type":          "Type is used to specify the scaling policy.",
+	"value":         "Value contains the amount of change which is permitted by the policy. It must be greater than zero",
+	"periodSeconds": "PeriodSeconds specifies the window of time for which the policy should hold true. PeriodSeconds must be greater than zero and less than or equal to 1800 (30 min).",
+}
+
+func (HPAScalingPolicy) SwaggerDoc() map[string]string {
+	return map_HPAScalingPolicy
+}
+
+var map_HPAScalingRules = map[string]string{
+	"":                           "HPAScalingRules configures the scaling behavior for one direction. These Rules are applied after calculating DesiredReplicas from metrics for the HPA. They can limit the scaling velocity by specifying scaling policies. They can prevent flapping by specifying the stabilization window, so that the number of replicas is not set instantly, instead, the safest value from the stabilization window is chosen.",
+	"stabilizationWindowSeconds": "StabilizationWindowSeconds is the number of seconds for which past recommendations should be considered while scaling up or scaling down. StabilizationWindowSeconds must be greater than or equal to zero and less than or equal to 3600 (one hour). If not set, use the default values: - For scale up: 0 (i.e. no stabilization is done). - For scale down: 300 (i.e. the stabilization window is 300 seconds long).",
+	"selectPolicy":               "selectPolicy is used to specify which policy should be used. If not set, the default value MaxPolicySelect is used.",
+	"policies":                   "policies is a list of potential scaling polices which can be used during scaling. At least one policy must be specified, otherwise the HPAScalingRules will be discarded as invalid",
+}
+
+func (HPAScalingRules) SwaggerDoc() map[string]string {
+	return map_HPAScalingRules
+}
+
 var map_HorizontalPodAutoscaler = map[string]string{
 	"":         "HorizontalPodAutoscaler is the configuration for a horizontal pod autoscaler, which automatically manages the replica count of any resource implementing the scale subresource based on the metrics specified.",
 	"metadata": "metadata is the standard object metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
@@ -69,6 +91,16 @@ func (HorizontalPodAutoscaler) SwaggerDoc() map[string]string {
 	return map_HorizontalPodAutoscaler
 }
 
+var map_HorizontalPodAutoscalerBehavior = map[string]string{
+	"":          "HorizontalPodAutoscalerBehavior configures the scaling behavior of the target in both Up and Down directions (scaleUp and scaleDown fields respectively).",
+	"scaleUp":   "scaleUp is scaling policy for scaling Up. If not set, the default value is the higher of:\n  * increase no more than 4 pods per 60 seconds\n  * double the number of pods per 60 seconds\nNo stabilization is used.",
+	"scaleDown": "scaleDown is scaling policy for scaling Down. If not set, the default value is to allow to scale down to minReplicas pods, with a 300 second stabilization window (i.e., the highest recommendation for the last 300sec is used).",
+}
+
+func (HorizontalPodAutoscalerBehavior) SwaggerDoc() map[string]string {
+	return map_HorizontalPodAutoscalerBehavior
+}
+
 var map_HorizontalPodAutoscalerCondition = map[string]string{
 	"":                   "HorizontalPodAutoscalerCondition describes the state of a HorizontalPodAutoscaler at a certain point.",
 	"type":               "type describes the current condition",
@@ -98,6 +130,7 @@ var map_HorizontalPodAutoscalerSpec = map[string]string{
 	"minReplicas":    "minReplicas is the lower limit for the number of replicas to which the autoscaler can scale down.  It defaults to 1 pod.  minReplicas is allowed to be 0 if the alpha feature gate HPAScaleToZero is enabled and at least one Object or External metric is configured.  Scaling is active as long as at least one metric value is available.",
 	"maxReplicas":    "maxReplicas is the upper limit for the number of replicas to which the autoscaler can scale up. It cannot be less that minReplicas.",
 	"metrics":        "metrics contains the specifications for which to use to calculate the desired replica count (the maximum replica count across all metrics will be used).  The desired replica count is calculated multiplying the ratio between the target value and the current value by the current number of pods.  Ergo, metrics used must decrease as the pod count is increased, and vice-versa.  See the individual metric source types for more information about how each type of metric must respond. If not set, the default metric will be set to 80% average CPU utilization.",
+	"behavior":       "behavior configures the scaling behavior of the target in both Up and Down directions (scaleUp and scaleDown fields respectively). If not set, the default HPAScalingRules for scale up and scale down are used.",
 }
 
 func (HorizontalPodAutoscalerSpec) SwaggerDoc() map[string]string {
diff --git a/vendor/k8s.io/api/autoscaling/v2beta2/zz_generated.deepcopy.go b/vendor/k8s.io/api/autoscaling/v2beta2/zz_generated.deepcopy.go
index 2dffa333..ca26fe92 100644
--- a/vendor/k8s.io/api/autoscaling/v2beta2/zz_generated.deepcopy.go
+++ b/vendor/k8s.io/api/autoscaling/v2beta2/zz_generated.deepcopy.go
@@ -77,6 +77,53 @@ func (in *ExternalMetricStatus) DeepCopy() *ExternalMetricStatus {
 	return out
 }
 
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *HPAScalingPolicy) DeepCopyInto(out *HPAScalingPolicy) {
+	*out = *in
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HPAScalingPolicy.
+func (in *HPAScalingPolicy) DeepCopy() *HPAScalingPolicy {
+	if in == nil {
+		return nil
+	}
+	out := new(HPAScalingPolicy)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *HPAScalingRules) DeepCopyInto(out *HPAScalingRules) {
+	*out = *in
+	if in.StabilizationWindowSeconds != nil {
+		in, out := &in.StabilizationWindowSeconds, &out.StabilizationWindowSeconds
+		*out = new(int32)
+		**out = **in
+	}
+	if in.SelectPolicy != nil {
+		in, out := &in.SelectPolicy, &out.SelectPolicy
+		*out = new(ScalingPolicySelect)
+		**out = **in
+	}
+	if in.Policies != nil {
+		in, out := &in.Policies, &out.Policies
+		*out = make([]HPAScalingPolicy, len(*in))
+		copy(*out, *in)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HPAScalingRules.
+func (in *HPAScalingRules) DeepCopy() *HPAScalingRules {
+	if in == nil {
+		return nil
+	}
+	out := new(HPAScalingRules)
+	in.DeepCopyInto(out)
+	return out
+}
+
 // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
 func (in *HorizontalPodAutoscaler) DeepCopyInto(out *HorizontalPodAutoscaler) {
 	*out = *in
@@ -105,6 +152,32 @@ func (in *HorizontalPodAutoscaler) DeepCopyObject() runtime.Object {
 	return nil
 }
 
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *HorizontalPodAutoscalerBehavior) DeepCopyInto(out *HorizontalPodAutoscalerBehavior) {
+	*out = *in
+	if in.ScaleUp != nil {
+		in, out := &in.ScaleUp, &out.ScaleUp
+		*out = new(HPAScalingRules)
+		(*in).DeepCopyInto(*out)
+	}
+	if in.ScaleDown != nil {
+		in, out := &in.ScaleDown, &out.ScaleDown
+		*out = new(HPAScalingRules)
+		(*in).DeepCopyInto(*out)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HorizontalPodAutoscalerBehavior.
+func (in *HorizontalPodAutoscalerBehavior) DeepCopy() *HorizontalPodAutoscalerBehavior {
+	if in == nil {
+		return nil
+	}
+	out := new(HorizontalPodAutoscalerBehavior)
+	in.DeepCopyInto(out)
+	return out
+}
+
 // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
 func (in *HorizontalPodAutoscalerCondition) DeepCopyInto(out *HorizontalPodAutoscalerCondition) {
 	*out = *in
@@ -171,6 +244,11 @@ func (in *HorizontalPodAutoscalerSpec) DeepCopyInto(out *HorizontalPodAutoscaler
 			(*in)[i].DeepCopyInto(&(*out)[i])
 		}
 	}
+	if in.Behavior != nil {
+		in, out := &in.Behavior, &out.Behavior
+		*out = new(HorizontalPodAutoscalerBehavior)
+		(*in).DeepCopyInto(*out)
+	}
 	return
 }
 
diff --git a/vendor/k8s.io/api/batch/v1/generated.pb.go b/vendor/k8s.io/api/batch/v1/generated.pb.go
index fb9d21e1..35944e72 100644
--- a/vendor/k8s.io/api/batch/v1/generated.pb.go
+++ b/vendor/k8s.io/api/batch/v1/generated.pb.go
@@ -43,7 +43,7 @@ var _ = math.Inf
 // is compatible with the proto package it is being compiled against.
 // A compilation error at this line likely means your copy of the
 // proto package needs to be updated.
-const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
+const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
 
 func (m *Job) Reset()      { *m = Job{} }
 func (*Job) ProtoMessage() {}
@@ -1771,6 +1771,7 @@ func (m *JobStatus) Unmarshal(dAtA []byte) error {
 func skipGenerated(dAtA []byte) (n int, err error) {
 	l := len(dAtA)
 	iNdEx := 0
+	depth := 0
 	for iNdEx < l {
 		var wire uint64
 		for shift := uint(0); ; shift += 7 {
@@ -1802,10 +1803,8 @@ func skipGenerated(dAtA []byte) (n int, err error) {
 					break
 				}
 			}
-			return iNdEx, nil
 		case 1:
 			iNdEx += 8
-			return iNdEx, nil
 		case 2:
 			var length int
 			for shift := uint(0); ; shift += 7 {
@@ -1826,55 +1825,30 @@ func skipGenerated(dAtA []byte) (n int, err error) {
 				return 0, ErrInvalidLengthGenerated
 			}
 			iNdEx += length
-			if iNdEx < 0 {
-				return 0, ErrInvalidLengthGenerated
-			}
-			return iNdEx, nil
 		case 3:
-			for {
-				var innerWire uint64
-				var start int = iNdEx
-				for shift := uint(0); ; shift += 7 {
-					if shift >= 64 {
-						return 0, ErrIntOverflowGenerated
-					}
-					if iNdEx >= l {
-						return 0, io.ErrUnexpectedEOF
-					}
-					b := dAtA[iNdEx]
-					iNdEx++
-					innerWire |= (uint64(b) & 0x7F) << shift
-					if b < 0x80 {
-						break
-					}
-				}
-				innerWireType := int(innerWire & 0x7)
-				if innerWireType == 4 {
-					break
-				}
-				next, err := skipGenerated(dAtA[start:])
-				if err != nil {
-					return 0, err
-				}
-				iNdEx = start + next
-				if iNdEx < 0 {
-					return 0, ErrInvalidLengthGenerated
-				}
-			}
-			return iNdEx, nil
+			depth++
 		case 4:
-			return iNdEx, nil
+			if depth == 0 {
+				return 0, ErrUnexpectedEndOfGroupGenerated
+			}
+			depth--
 		case 5:
 			iNdEx += 4
-			return iNdEx, nil
 		default:
 			return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
 		}
+		if iNdEx < 0 {
+			return 0, ErrInvalidLengthGenerated
+		}
+		if depth == 0 {
+			return iNdEx, nil
+		}
 	}
-	panic("unreachable")
+	return 0, io.ErrUnexpectedEOF
 }
 
 var (
-	ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling")
-	ErrIntOverflowGenerated   = fmt.Errorf("proto: integer overflow")
+	ErrInvalidLengthGenerated        = fmt.Errorf("proto: negative length found during unmarshaling")
+	ErrIntOverflowGenerated          = fmt.Errorf("proto: integer overflow")
+	ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group")
 )
diff --git a/vendor/k8s.io/api/batch/v1beta1/generated.pb.go b/vendor/k8s.io/api/batch/v1beta1/generated.pb.go
index 837a2f9c..69c4054b 100644
--- a/vendor/k8s.io/api/batch/v1beta1/generated.pb.go
+++ b/vendor/k8s.io/api/batch/v1beta1/generated.pb.go
@@ -43,7 +43,7 @@ var _ = math.Inf
 // is compatible with the proto package it is being compiled against.
 // A compilation error at this line likely means your copy of the
 // proto package needs to be updated.
-const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
+const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
 
 func (m *CronJob) Reset()      { *m = CronJob{} }
 func (*CronJob) ProtoMessage() {}
@@ -1660,6 +1660,7 @@ func (m *JobTemplateSpec) Unmarshal(dAtA []byte) error {
 func skipGenerated(dAtA []byte) (n int, err error) {
 	l := len(dAtA)
 	iNdEx := 0
+	depth := 0
 	for iNdEx < l {
 		var wire uint64
 		for shift := uint(0); ; shift += 7 {
@@ -1691,10 +1692,8 @@ func skipGenerated(dAtA []byte) (n int, err error) {
 					break
 				}
 			}
-			return iNdEx, nil
 		case 1:
 			iNdEx += 8
-			return iNdEx, nil
 		case 2:
 			var length int
 			for shift := uint(0); ; shift += 7 {
@@ -1715,55 +1714,30 @@ func skipGenerated(dAtA []byte) (n int, err error) {
 				return 0, ErrInvalidLengthGenerated
 			}
 			iNdEx += length
-			if iNdEx < 0 {
-				return 0, ErrInvalidLengthGenerated
-			}
-			return iNdEx, nil
 		case 3:
-			for {
-				var innerWire uint64
-				var start int = iNdEx
-				for shift := uint(0); ; shift += 7 {
-					if shift >= 64 {
-						return 0, ErrIntOverflowGenerated
-					}
-					if iNdEx >= l {
-						return 0, io.ErrUnexpectedEOF
-					}
-					b := dAtA[iNdEx]
-					iNdEx++
-					innerWire |= (uint64(b) & 0x7F) << shift
-					if b < 0x80 {
-						break
-					}
-				}
-				innerWireType := int(innerWire & 0x7)
-				if innerWireType == 4 {
-					break
-				}
-				next, err := skipGenerated(dAtA[start:])
-				if err != nil {
-					return 0, err
-				}
-				iNdEx = start + next
-				if iNdEx < 0 {
-					return 0, ErrInvalidLengthGenerated
-				}
-			}
-			return iNdEx, nil
+			depth++
 		case 4:
-			return iNdEx, nil
+			if depth == 0 {
+				return 0, ErrUnexpectedEndOfGroupGenerated
+			}
+			depth--
 		case 5:
 			iNdEx += 4
-			return iNdEx, nil
 		default:
 			return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
 		}
+		if iNdEx < 0 {
+			return 0, ErrInvalidLengthGenerated
+		}
+		if depth == 0 {
+			return iNdEx, nil
+		}
 	}
-	panic("unreachable")
+	return 0, io.ErrUnexpectedEOF
 }
 
 var (
-	ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling")
-	ErrIntOverflowGenerated   = fmt.Errorf("proto: integer overflow")
+	ErrInvalidLengthGenerated        = fmt.Errorf("proto: negative length found during unmarshaling")
+	ErrIntOverflowGenerated          = fmt.Errorf("proto: integer overflow")
+	ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group")
 )
diff --git a/vendor/k8s.io/api/batch/v2alpha1/generated.pb.go b/vendor/k8s.io/api/batch/v2alpha1/generated.pb.go
index 8271c841..3e58dbb9 100644
--- a/vendor/k8s.io/api/batch/v2alpha1/generated.pb.go
+++ b/vendor/k8s.io/api/batch/v2alpha1/generated.pb.go
@@ -43,7 +43,7 @@ var _ = math.Inf
 // is compatible with the proto package it is being compiled against.
 // A compilation error at this line likely means your copy of the
 // proto package needs to be updated.
-const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
+const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
 
 func (m *CronJob) Reset()      { *m = CronJob{} }
 func (*CronJob) ProtoMessage() {}
@@ -1660,6 +1660,7 @@ func (m *JobTemplateSpec) Unmarshal(dAtA []byte) error {
 func skipGenerated(dAtA []byte) (n int, err error) {
 	l := len(dAtA)
 	iNdEx := 0
+	depth := 0
 	for iNdEx < l {
 		var wire uint64
 		for shift := uint(0); ; shift += 7 {
@@ -1691,10 +1692,8 @@ func skipGenerated(dAtA []byte) (n int, err error) {
 					break
 				}
 			}
-			return iNdEx, nil
 		case 1:
 			iNdEx += 8
-			return iNdEx, nil
 		case 2:
 			var length int
 			for shift := uint(0); ; shift += 7 {
@@ -1715,55 +1714,30 @@ func skipGenerated(dAtA []byte) (n int, err error) {
 				return 0, ErrInvalidLengthGenerated
 			}
 			iNdEx += length
-			if iNdEx < 0 {
-				return 0, ErrInvalidLengthGenerated
-			}
-			return iNdEx, nil
 		case 3:
-			for {
-				var innerWire uint64
-				var start int = iNdEx
-				for shift := uint(0); ; shift += 7 {
-					if shift >= 64 {
-						return 0, ErrIntOverflowGenerated
-					}
-					if iNdEx >= l {
-						return 0, io.ErrUnexpectedEOF
-					}
-					b := dAtA[iNdEx]
-					iNdEx++
-					innerWire |= (uint64(b) & 0x7F) << shift
-					if b < 0x80 {
-						break
-					}
-				}
-				innerWireType := int(innerWire & 0x7)
-				if innerWireType == 4 {
-					break
-				}
-				next, err := skipGenerated(dAtA[start:])
-				if err != nil {
-					return 0, err
-				}
-				iNdEx = start + next
-				if iNdEx < 0 {
-					return 0, ErrInvalidLengthGenerated
-				}
-			}
-			return iNdEx, nil
+			depth++
 		case 4:
-			return iNdEx, nil
+			if depth == 0 {
+				return 0, ErrUnexpectedEndOfGroupGenerated
+			}
+			depth--
 		case 5:
 			iNdEx += 4
-			return iNdEx, nil
 		default:
 			return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
 		}
+		if iNdEx < 0 {
+			return 0, ErrInvalidLengthGenerated
+		}
+		if depth == 0 {
+			return iNdEx, nil
+		}
 	}
-	panic("unreachable")
+	return 0, io.ErrUnexpectedEOF
 }
 
 var (
-	ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling")
-	ErrIntOverflowGenerated   = fmt.Errorf("proto: integer overflow")
+	ErrInvalidLengthGenerated        = fmt.Errorf("proto: negative length found during unmarshaling")
+	ErrIntOverflowGenerated          = fmt.Errorf("proto: integer overflow")
+	ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group")
 )
diff --git a/vendor/k8s.io/api/certificates/v1beta1/generated.pb.go b/vendor/k8s.io/api/certificates/v1beta1/generated.pb.go
index 2e61b568..24fa4bf8 100644
--- a/vendor/k8s.io/api/certificates/v1beta1/generated.pb.go
+++ b/vendor/k8s.io/api/certificates/v1beta1/generated.pb.go
@@ -42,7 +42,7 @@ var _ = math.Inf
 // is compatible with the proto package it is being compiled against.
 // A compilation error at this line likely means your copy of the
 // proto package needs to be updated.
-const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
+const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
 
 func (m *CertificateSigningRequest) Reset()      { *m = CertificateSigningRequest{} }
 func (*CertificateSigningRequest) ProtoMessage() {}
@@ -227,58 +227,59 @@ func init() {
 }
 
 var fileDescriptor_09d156762b8218ef = []byte{
-	// 805 bytes of a gzipped FileDescriptorProto
-	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x54, 0x4b, 0x8f, 0x1b, 0x45,
-	0x10, 0xf6, 0xf8, 0xb5, 0x76, 0x7b, 0xd9, 0x44, 0x2d, 0x14, 0x0d, 0x2b, 0x65, 0x66, 0x35, 0x02,
-	0xb4, 0x3c, 0xd2, 0xc3, 0x46, 0x08, 0x56, 0x7b, 0x40, 0x30, 0x4b, 0x04, 0x2b, 0x12, 0x21, 0x75,
-	0x62, 0x0e, 0x08, 0x89, 0xb4, 0xc7, 0x95, 0x71, 0xc7, 0x99, 0x07, 0xd3, 0x3d, 0x06, 0xdf, 0xf2,
-	0x13, 0x38, 0x72, 0x41, 0xe2, 0x97, 0x70, 0x5e, 0x0e, 0x48, 0x39, 0xe6, 0x80, 0x2c, 0xd6, 0xfc,
-	0x8b, 0x9c, 0x50, 0xf7, 0xb4, 0x3d, 0xc6, 0x2b, 0xe3, 0x28, 0x7b, 0x9b, 0xfa, 0xaa, 0xbe, 0xaf,
-	0x1e, 0x5d, 0x35, 0xe8, 0xcb, 0xf1, 0xb1, 0x20, 0x3c, 0xf5, 0xc7, 0xc5, 0x00, 0xf2, 0x04, 0x24,
-	0x08, 0x7f, 0x02, 0xc9, 0x30, 0xcd, 0x7d, 0xe3, 0x60, 0x19, 0xf7, 0x43, 0xc8, 0x25, 0x7f, 0xc4,
-	0x43, 0xa6, 0xdd, 0x47, 0x03, 0x90, 0xec, 0xc8, 0x8f, 0x20, 0x81, 0x9c, 0x49, 0x18, 0x92, 0x2c,
-	0x4f, 0x65, 0x8a, 0xdd, 0x92, 0x40, 0x58, 0xc6, 0xc9, 0x2a, 0x81, 0x18, 0xc2, 0xfe, 0xad, 0x88,
-	0xcb, 0x51, 0x31, 0x20, 0x61, 0x1a, 0xfb, 0x51, 0x1a, 0xa5, 0xbe, 0xe6, 0x0d, 0x8a, 0x47, 0xda,
-	0xd2, 0x86, 0xfe, 0x2a, 0xf5, 0xf6, 0x3f, 0xac, 0x0a, 0x88, 0x59, 0x38, 0xe2, 0x09, 0xe4, 0x53,
-	0x3f, 0x1b, 0x47, 0x0a, 0x10, 0x7e, 0x0c, 0x92, 0xf9, 0x93, 0x4b, 0x55, 0xec, 0xfb, 0x9b, 0x58,
-	0x79, 0x91, 0x48, 0x1e, 0xc3, 0x25, 0xc2, 0x47, 0xdb, 0x08, 0x22, 0x1c, 0x41, 0xcc, 0xd6, 0x79,
-	0xde, 0x1f, 0x75, 0xf4, 0xc6, 0x69, 0xd5, 0xe6, 0x7d, 0x1e, 0x25, 0x3c, 0x89, 0x28, 0xfc, 0x50,
-	0x80, 0x90, 0xf8, 0x21, 0xea, 0xa8, 0x0a, 0x87, 0x4c, 0x32, 0xdb, 0x3a, 0xb0, 0x0e, 0x7b, 0xb7,
-	0x3f, 0x20, 0xd5, 0x7c, 0x96, 0x89, 0x48, 0x36, 0x8e, 0x14, 0x20, 0x88, 0x8a, 0x26, 0x93, 0x23,
-	0xf2, 0xf5, 0xe0, 0x31, 0x84, 0xf2, 0x1e, 0x48, 0x16, 0xe0, 0xf3, 0x99, 0x5b, 0x9b, 0xcf, 0x5c,
-	0x54, 0x61, 0x74, 0xa9, 0x8a, 0x1f, 0xa2, 0xa6, 0xc8, 0x20, 0xb4, 0xeb, 0x5a, 0xfd, 0x13, 0xb2,
-	0x65, 0xfa, 0x64, 0x63, 0xad, 0xf7, 0x33, 0x08, 0x83, 0x5d, 0x93, 0xab, 0xa9, 0x2c, 0xaa, 0x95,
-	0xf1, 0x08, 0xb5, 0x85, 0x64, 0xb2, 0x10, 0x76, 0x43, 0xe7, 0xf8, 0xf4, 0x0a, 0x39, 0xb4, 0x4e,
-	0xb0, 0x67, 0xb2, 0xb4, 0x4b, 0x9b, 0x1a, 0x7d, 0xef, 0xd7, 0x3a, 0xf2, 0x36, 0x72, 0x4f, 0xd3,
-	0x64, 0xc8, 0x25, 0x4f, 0x13, 0x7c, 0x8c, 0x9a, 0x72, 0x9a, 0x81, 0x1e, 0x68, 0x37, 0x78, 0x73,
-	0x51, 0xf2, 0x83, 0x69, 0x06, 0x2f, 0x66, 0xee, 0xeb, 0xeb, 0xf1, 0x0a, 0xa7, 0x9a, 0x81, 0xdf,
-	0x46, 0xed, 0x1c, 0x98, 0x48, 0x13, 0x3d, 0xae, 0x6e, 0x55, 0x08, 0xd5, 0x28, 0x35, 0x5e, 0xfc,
-	0x0e, 0xda, 0x89, 0x41, 0x08, 0x16, 0x81, 0xee, 0xb9, 0x1b, 0x5c, 0x33, 0x81, 0x3b, 0xf7, 0x4a,
-	0x98, 0x2e, 0xfc, 0xf8, 0x31, 0xda, 0x7b, 0xc2, 0x84, 0xec, 0x67, 0x43, 0x26, 0xe1, 0x01, 0x8f,
-	0xc1, 0x6e, 0xea, 0x29, 0xbd, 0xfb, 0x72, 0xef, 0xac, 0x18, 0xc1, 0x0d, 0xa3, 0xbe, 0x77, 0xf7,
-	0x3f, 0x4a, 0x74, 0x4d, 0xd9, 0x9b, 0x59, 0xe8, 0xe6, 0xc6, 0xf9, 0xdc, 0xe5, 0x42, 0xe2, 0xef,
-	0x2e, 0xed, 0x1b, 0x79, 0xb9, 0x3a, 0x14, 0x5b, 0x6f, 0xdb, 0x75, 0x53, 0x4b, 0x67, 0x81, 0xac,
-	0xec, 0xda, 0xf7, 0xa8, 0xc5, 0x25, 0xc4, 0xc2, 0xae, 0x1f, 0x34, 0x0e, 0x7b, 0xb7, 0x4f, 0x5e,
-	0x7d, 0x11, 0x82, 0xd7, 0x4c, 0x9a, 0xd6, 0x99, 0x12, 0xa4, 0xa5, 0xae, 0xf7, 0x7b, 0xe3, 0x7f,
-	0x1a, 0x54, 0x2b, 0x89, 0xdf, 0x42, 0x3b, 0x79, 0x69, 0xea, 0xfe, 0x76, 0x83, 0x9e, 0x7a, 0x15,
-	0x13, 0x41, 0x17, 0x3e, 0x4c, 0x50, 0xbb, 0x50, 0xcf, 0x23, 0xec, 0xd6, 0x41, 0xe3, 0xb0, 0x1b,
-	0xdc, 0x50, 0x8f, 0xdc, 0xd7, 0xc8, 0x8b, 0x99, 0xdb, 0xf9, 0x0a, 0xa6, 0xda, 0xa0, 0x26, 0x0a,
-	0xbf, 0x8f, 0x3a, 0x85, 0x80, 0x3c, 0x61, 0x31, 0x98, 0xd5, 0x58, 0xce, 0xa1, 0x6f, 0x70, 0xba,
-	0x8c, 0xc0, 0x37, 0x51, 0xa3, 0xe0, 0x43, 0xb3, 0x1a, 0x3d, 0x13, 0xd8, 0xe8, 0x9f, 0x7d, 0x4e,
-	0x15, 0x8e, 0x3d, 0xd4, 0x8e, 0xf2, 0xb4, 0xc8, 0x84, 0xdd, 0xd4, 0xc9, 0x91, 0x4a, 0xfe, 0x85,
-	0x46, 0xa8, 0xf1, 0xe0, 0x04, 0xb5, 0xe0, 0x27, 0x99, 0x33, 0xbb, 0xad, 0x47, 0x79, 0x76, 0xb5,
-	0xbb, 0x25, 0x77, 0x94, 0xd6, 0x9d, 0x44, 0xe6, 0xd3, 0x6a, 0xb2, 0x1a, 0xa3, 0x65, 0x9a, 0x7d,
-	0x40, 0xa8, 0x8a, 0xc1, 0xd7, 0x51, 0x63, 0x0c, 0xd3, 0xf2, 0x80, 0xa8, 0xfa, 0xc4, 0x9f, 0xa1,
-	0xd6, 0x84, 0x3d, 0x29, 0xc0, 0xfc, 0x47, 0xde, 0xdb, 0x5a, 0x8f, 0x56, 0xfb, 0x46, 0x51, 0x68,
-	0xc9, 0x3c, 0xa9, 0x1f, 0x5b, 0xde, 0x9f, 0x16, 0x72, 0xb7, 0x5c, 0x3f, 0xfe, 0x11, 0xa1, 0x70,
-	0x71, 0x9b, 0xc2, 0xb6, 0x74, 0xff, 0xa7, 0xaf, 0xde, 0xff, 0xf2, 0xce, 0xab, 0x1f, 0xe5, 0x12,
-	0x12, 0x74, 0x25, 0x15, 0x3e, 0x42, 0xbd, 0x15, 0x69, 0xdd, 0xe9, 0x6e, 0x70, 0x6d, 0x3e, 0x73,
-	0x7b, 0x2b, 0xe2, 0x74, 0x35, 0xc6, 0xfb, 0xd8, 0x8c, 0x4d, 0x37, 0x8a, 0xdd, 0xc5, 0xfe, 0x5b,
-	0xfa, 0x5d, 0xbb, 0xeb, 0xfb, 0x7b, 0xd2, 0xf9, 0xe5, 0x37, 0xb7, 0xf6, 0xf4, 0xaf, 0x83, 0x5a,
-	0x70, 0xeb, 0xfc, 0xc2, 0xa9, 0x3d, 0xbb, 0x70, 0x6a, 0xcf, 0x2f, 0x9c, 0xda, 0xd3, 0xb9, 0x63,
-	0x9d, 0xcf, 0x1d, 0xeb, 0xd9, 0xdc, 0xb1, 0x9e, 0xcf, 0x1d, 0xeb, 0xef, 0xb9, 0x63, 0xfd, 0xfc,
-	0x8f, 0x53, 0xfb, 0x76, 0xc7, 0x74, 0xf7, 0x6f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x39, 0x0e, 0xb6,
-	0xcd, 0x7f, 0x07, 0x00, 0x00,
+	// 824 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x54, 0x4d, 0x6f, 0x1b, 0x45,
+	0x18, 0xf6, 0xfa, 0xdb, 0xe3, 0x90, 0x56, 0x23, 0x54, 0x2d, 0x91, 0xba, 0x1b, 0xad, 0x00, 0x85,
+	0x8f, 0xce, 0x92, 0x0a, 0x41, 0x94, 0x03, 0x82, 0x0d, 0x15, 0x44, 0xb4, 0x20, 0x4d, 0x1a, 0x0e,
+	0x08, 0x89, 0x8e, 0xd7, 0x6f, 0x37, 0x53, 0x77, 0x3f, 0xd8, 0x99, 0x35, 0xf8, 0xd6, 0x9f, 0xc0,
+	0x91, 0x0b, 0x12, 0x3f, 0x27, 0x1c, 0x90, 0x7a, 0xec, 0x01, 0x59, 0xc4, 0xdc, 0xf9, 0x01, 0x3d,
+	0xa1, 0x99, 0x1d, 0x7b, 0x8d, 0x23, 0xd7, 0x55, 0x73, 0xdb, 0xf7, 0x79, 0xdf, 0xe7, 0x79, 0x3f,
+	0x67, 0xd1, 0x97, 0xa3, 0x03, 0x41, 0x78, 0xea, 0x8f, 0x8a, 0x01, 0xe4, 0x09, 0x48, 0x10, 0xfe,
+	0x18, 0x92, 0x61, 0x9a, 0xfb, 0xc6, 0xc1, 0x32, 0xee, 0x87, 0x90, 0x4b, 0xfe, 0x90, 0x87, 0x4c,
+	0xbb, 0xf7, 0x07, 0x20, 0xd9, 0xbe, 0x1f, 0x41, 0x02, 0x39, 0x93, 0x30, 0x24, 0x59, 0x9e, 0xca,
+	0x14, 0xbb, 0x25, 0x81, 0xb0, 0x8c, 0x93, 0x65, 0x02, 0x31, 0x84, 0x9d, 0x5b, 0x11, 0x97, 0x67,
+	0xc5, 0x80, 0x84, 0x69, 0xec, 0x47, 0x69, 0x94, 0xfa, 0x9a, 0x37, 0x28, 0x1e, 0x6a, 0x4b, 0x1b,
+	0xfa, 0xab, 0xd4, 0xdb, 0xf9, 0xb0, 0x2a, 0x20, 0x66, 0xe1, 0x19, 0x4f, 0x20, 0x9f, 0xf8, 0xd9,
+	0x28, 0x52, 0x80, 0xf0, 0x63, 0x90, 0xcc, 0x1f, 0x5f, 0xaa, 0x62, 0xc7, 0x5f, 0xc7, 0xca, 0x8b,
+	0x44, 0xf2, 0x18, 0x2e, 0x11, 0x3e, 0xda, 0x44, 0x10, 0xe1, 0x19, 0xc4, 0x6c, 0x95, 0xe7, 0xfd,
+	0x51, 0x47, 0x6f, 0x1c, 0x55, 0x6d, 0x9e, 0xf0, 0x28, 0xe1, 0x49, 0x44, 0xe1, 0xc7, 0x02, 0x84,
+	0xc4, 0x0f, 0x50, 0x57, 0x55, 0x38, 0x64, 0x92, 0xd9, 0xd6, 0xae, 0xb5, 0xd7, 0xbf, 0xfd, 0x01,
+	0xa9, 0xe6, 0xb3, 0x48, 0x44, 0xb2, 0x51, 0xa4, 0x00, 0x41, 0x54, 0x34, 0x19, 0xef, 0x93, 0x6f,
+	0x06, 0x8f, 0x20, 0x94, 0xf7, 0x40, 0xb2, 0x00, 0x9f, 0x4f, 0xdd, 0xda, 0x6c, 0xea, 0xa2, 0x0a,
+	0xa3, 0x0b, 0x55, 0xfc, 0x00, 0x35, 0x45, 0x06, 0xa1, 0x5d, 0xd7, 0xea, 0x9f, 0x90, 0x0d, 0xd3,
+	0x27, 0x6b, 0x6b, 0x3d, 0xc9, 0x20, 0x0c, 0xb6, 0x4c, 0xae, 0xa6, 0xb2, 0xa8, 0x56, 0xc6, 0x67,
+	0xa8, 0x2d, 0x24, 0x93, 0x85, 0xb0, 0x1b, 0x3a, 0xc7, 0xa7, 0x57, 0xc8, 0xa1, 0x75, 0x82, 0x6d,
+	0x93, 0xa5, 0x5d, 0xda, 0xd4, 0xe8, 0x7b, 0xbf, 0xd5, 0x91, 0xb7, 0x96, 0x7b, 0x94, 0x26, 0x43,
+	0x2e, 0x79, 0x9a, 0xe0, 0x03, 0xd4, 0x94, 0x93, 0x0c, 0xf4, 0x40, 0x7b, 0xc1, 0x9b, 0xf3, 0x92,
+	0xef, 0x4f, 0x32, 0x78, 0x3e, 0x75, 0x5f, 0x5f, 0x8d, 0x57, 0x38, 0xd5, 0x0c, 0xfc, 0x36, 0x6a,
+	0xe7, 0xc0, 0x44, 0x9a, 0xe8, 0x71, 0xf5, 0xaa, 0x42, 0xa8, 0x46, 0xa9, 0xf1, 0xe2, 0x77, 0x50,
+	0x27, 0x06, 0x21, 0x58, 0x04, 0xba, 0xe7, 0x5e, 0x70, 0xcd, 0x04, 0x76, 0xee, 0x95, 0x30, 0x9d,
+	0xfb, 0xf1, 0x23, 0xb4, 0xfd, 0x98, 0x09, 0x79, 0x9a, 0x0d, 0x99, 0x84, 0xfb, 0x3c, 0x06, 0xbb,
+	0xa9, 0xa7, 0xf4, 0xee, 0xcb, 0xed, 0x59, 0x31, 0x82, 0x1b, 0x46, 0x7d, 0xfb, 0xee, 0xff, 0x94,
+	0xe8, 0x8a, 0xb2, 0x37, 0xb5, 0xd0, 0xcd, 0xb5, 0xf3, 0xb9, 0xcb, 0x85, 0xc4, 0xdf, 0x5f, 0xba,
+	0x37, 0xf2, 0x72, 0x75, 0x28, 0xb6, 0xbe, 0xb6, 0xeb, 0xa6, 0x96, 0xee, 0x1c, 0x59, 0xba, 0xb5,
+	0x1f, 0x50, 0x8b, 0x4b, 0x88, 0x85, 0x5d, 0xdf, 0x6d, 0xec, 0xf5, 0x6f, 0x1f, 0xbe, 0xfa, 0x21,
+	0x04, 0xaf, 0x99, 0x34, 0xad, 0x63, 0x25, 0x48, 0x4b, 0x5d, 0xef, 0xdf, 0xc6, 0x0b, 0x1a, 0x54,
+	0x27, 0x89, 0xdf, 0x42, 0x9d, 0xbc, 0x34, 0x75, 0x7f, 0x5b, 0x41, 0x5f, 0x6d, 0xc5, 0x44, 0xd0,
+	0xb9, 0x0f, 0x13, 0x84, 0x04, 0x8f, 0x12, 0xc8, 0xbf, 0x66, 0x31, 0xd8, 0x9d, 0x72, 0xd9, 0xea,
+	0x0d, 0x9d, 0x2c, 0x50, 0xba, 0x14, 0x81, 0x09, 0x6a, 0x17, 0x6a, 0x9d, 0xc2, 0x6e, 0xed, 0x36,
+	0xf6, 0x7a, 0xc1, 0x0d, 0x75, 0x14, 0xa7, 0x1a, 0x79, 0x3e, 0x75, 0xbb, 0x5f, 0xc1, 0x44, 0x1b,
+	0xd4, 0x44, 0xe1, 0xf7, 0x51, 0xb7, 0x10, 0x90, 0x27, 0x4a, 0xbd, 0x3c, 0xa5, 0xc5, 0xdc, 0x4e,
+	0x0d, 0x4e, 0x17, 0x11, 0xf8, 0x26, 0x6a, 0x14, 0x7c, 0x68, 0x4e, 0xa9, 0x6f, 0x02, 0x1b, 0xa7,
+	0xc7, 0x9f, 0x53, 0x85, 0x63, 0x0f, 0xb5, 0xa3, 0x3c, 0x2d, 0x32, 0x61, 0x37, 0x75, 0x72, 0xa4,
+	0x92, 0x7f, 0xa1, 0x11, 0x6a, 0x3c, 0x38, 0x41, 0x2d, 0xf8, 0x59, 0xe6, 0xcc, 0x6e, 0xeb, 0xd1,
+	0x1f, 0x5f, 0xed, 0x9d, 0x93, 0x3b, 0x4a, 0xeb, 0x4e, 0x22, 0xf3, 0x49, 0xb5, 0x09, 0x8d, 0xd1,
+	0x32, 0xcd, 0x0e, 0x20, 0x54, 0xc5, 0xe0, 0xeb, 0xa8, 0x31, 0x82, 0x49, 0xf9, 0xe0, 0xa8, 0xfa,
+	0xc4, 0x9f, 0xa1, 0xd6, 0x98, 0x3d, 0x2e, 0xc0, 0xfc, 0x77, 0xde, 0xdb, 0x58, 0x8f, 0x56, 0xfb,
+	0x56, 0x51, 0x68, 0xc9, 0x3c, 0xac, 0x1f, 0x58, 0xde, 0x9f, 0x16, 0x72, 0x37, 0xfc, 0x2d, 0xf0,
+	0x4f, 0x08, 0x85, 0xf3, 0xb7, 0x2c, 0x6c, 0x4b, 0xf7, 0x7f, 0xf4, 0xea, 0xfd, 0x2f, 0xfe, 0x0b,
+	0xd5, 0x8f, 0x75, 0x01, 0x09, 0xba, 0x94, 0x0a, 0xef, 0xa3, 0xfe, 0x92, 0xb4, 0xee, 0x74, 0x2b,
+	0xb8, 0x36, 0x9b, 0xba, 0xfd, 0x25, 0x71, 0xba, 0x1c, 0xe3, 0x7d, 0x6c, 0xc6, 0xa6, 0x1b, 0xc5,
+	0xee, 0xfc, 0xbd, 0x58, 0x7a, 0xaf, 0xbd, 0xd5, 0x7b, 0x3f, 0xec, 0xfe, 0xfa, 0xbb, 0x5b, 0x7b,
+	0xf2, 0xd7, 0x6e, 0x2d, 0xb8, 0x75, 0x7e, 0xe1, 0xd4, 0x9e, 0x5e, 0x38, 0xb5, 0x67, 0x17, 0x4e,
+	0xed, 0xc9, 0xcc, 0xb1, 0xce, 0x67, 0x8e, 0xf5, 0x74, 0xe6, 0x58, 0xcf, 0x66, 0x8e, 0xf5, 0xf7,
+	0xcc, 0xb1, 0x7e, 0xf9, 0xc7, 0xa9, 0x7d, 0xd7, 0x31, 0xdd, 0xfd, 0x17, 0x00, 0x00, 0xff, 0xff,
+	0x69, 0x8d, 0xc8, 0xd3, 0xaf, 0x07, 0x00, 0x00,
 }
 
 func (m *CertificateSigningRequest) Marshal() (dAtA []byte, err error) {
@@ -449,6 +450,13 @@ func (m *CertificateSigningRequestSpec) MarshalToSizedBuffer(dAtA []byte) (int,
 	_ = i
 	var l int
 	_ = l
+	if m.SignerName != nil {
+		i -= len(*m.SignerName)
+		copy(dAtA[i:], *m.SignerName)
+		i = encodeVarintGenerated(dAtA, i, uint64(len(*m.SignerName)))
+		i--
+		dAtA[i] = 0x3a
+	}
 	if len(m.Extra) > 0 {
 		keysForExtra := make([]string, 0, len(m.Extra))
 		for k := range m.Extra {
@@ -687,6 +695,10 @@ func (m *CertificateSigningRequestSpec) Size() (n int) {
 			n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize))
 		}
 	}
+	if m.SignerName != nil {
+		l = len(*m.SignerName)
+		n += 1 + l + sovGenerated(uint64(l))
+	}
 	return n
 }
 
@@ -792,6 +804,7 @@ func (this *CertificateSigningRequestSpec) String() string {
 		`Groups:` + fmt.Sprintf("%v", this.Groups) + `,`,
 		`Usages:` + fmt.Sprintf("%v", this.Usages) + `,`,
 		`Extra:` + mapStringForExtra + `,`,
+		`SignerName:` + valueToStringGenerated(this.SignerName) + `,`,
 		`}`,
 	}, "")
 	return s
@@ -1594,6 +1607,39 @@ func (m *CertificateSigningRequestSpec) Unmarshal(dAtA []byte) error {
 			}
 			m.Extra[mapkey] = *mapvalue
 			iNdEx = postIndex
+		case 7:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field SignerName", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			s := string(dAtA[iNdEx:postIndex])
+			m.SignerName = &s
+			iNdEx = postIndex
 		default:
 			iNdEx = preIndex
 			skippy, err := skipGenerated(dAtA[iNdEx:])
@@ -1827,6 +1873,7 @@ func (m *ExtraValue) Unmarshal(dAtA []byte) error {
 func skipGenerated(dAtA []byte) (n int, err error) {
 	l := len(dAtA)
 	iNdEx := 0
+	depth := 0
 	for iNdEx < l {
 		var wire uint64
 		for shift := uint(0); ; shift += 7 {
@@ -1858,10 +1905,8 @@ func skipGenerated(dAtA []byte) (n int, err error) {
 					break
 				}
 			}
-			return iNdEx, nil
 		case 1:
 			iNdEx += 8
-			return iNdEx, nil
 		case 2:
 			var length int
 			for shift := uint(0); ; shift += 7 {
@@ -1882,55 +1927,30 @@ func skipGenerated(dAtA []byte) (n int, err error) {
 				return 0, ErrInvalidLengthGenerated
 			}
 			iNdEx += length
-			if iNdEx < 0 {
-				return 0, ErrInvalidLengthGenerated
-			}
-			return iNdEx, nil
 		case 3:
-			for {
-				var innerWire uint64
-				var start int = iNdEx
-				for shift := uint(0); ; shift += 7 {
-					if shift >= 64 {
-						return 0, ErrIntOverflowGenerated
-					}
-					if iNdEx >= l {
-						return 0, io.ErrUnexpectedEOF
-					}
-					b := dAtA[iNdEx]
-					iNdEx++
-					innerWire |= (uint64(b) & 0x7F) << shift
-					if b < 0x80 {
-						break
-					}
-				}
-				innerWireType := int(innerWire & 0x7)
-				if innerWireType == 4 {
-					break
-				}
-				next, err := skipGenerated(dAtA[start:])
-				if err != nil {
-					return 0, err
-				}
-				iNdEx = start + next
-				if iNdEx < 0 {
-					return 0, ErrInvalidLengthGenerated
-				}
-			}
-			return iNdEx, nil
+			depth++
 		case 4:
-			return iNdEx, nil
+			if depth == 0 {
+				return 0, ErrUnexpectedEndOfGroupGenerated
+			}
+			depth--
 		case 5:
 			iNdEx += 4
-			return iNdEx, nil
 		default:
 			return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
 		}
+		if iNdEx < 0 {
+			return 0, ErrInvalidLengthGenerated
+		}
+		if depth == 0 {
+			return iNdEx, nil
+		}
 	}
-	panic("unreachable")
+	return 0, io.ErrUnexpectedEOF
 }
 
 var (
-	ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling")
-	ErrIntOverflowGenerated   = fmt.Errorf("proto: integer overflow")
+	ErrInvalidLengthGenerated        = fmt.Errorf("proto: negative length found during unmarshaling")
+	ErrIntOverflowGenerated          = fmt.Errorf("proto: integer overflow")
+	ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group")
 )
diff --git a/vendor/k8s.io/api/certificates/v1beta1/generated.proto b/vendor/k8s.io/api/certificates/v1beta1/generated.proto
index 5200224a..78d2dbc7 100644
--- a/vendor/k8s.io/api/certificates/v1beta1/generated.proto
+++ b/vendor/k8s.io/api/certificates/v1beta1/generated.proto
@@ -73,6 +73,19 @@ message CertificateSigningRequestSpec {
   // Base64-encoded PKCS#10 CSR data
   optional bytes request = 1;
 
+  // Requested signer for the request. It is a qualified name in the form:
+  // `scope-hostname.io/name`.
+  // If empty, it will be defaulted:
+  //  1. If it's a kubelet client certificate, it is assigned
+  //     "kubernetes.io/kube-apiserver-client-kubelet".
+  //  2. If it's a kubelet serving certificate, it is assigned
+  //     "kubernetes.io/kubelet-serving".
+  //  3. Otherwise, it is assigned "kubernetes.io/legacy-unknown".
+  // Distribution of trust for signers happens out of band.
+  // You can select on this field using `spec.signerName`.
+  // +optional
+  optional string signerName = 7;
+
   // allowedUsages specifies a set of usage contexts the key will be
   // valid for.
   // See: https://tools.ietf.org/html/rfc5280#section-4.2.1.3
diff --git a/vendor/k8s.io/api/certificates/v1beta1/types.go b/vendor/k8s.io/api/certificates/v1beta1/types.go
index 93f81cd5..5a46e634 100644
--- a/vendor/k8s.io/api/certificates/v1beta1/types.go
+++ b/vendor/k8s.io/api/certificates/v1beta1/types.go
@@ -48,6 +48,19 @@ type CertificateSigningRequestSpec struct {
 	// Base64-encoded PKCS#10 CSR data
 	Request []byte `json:"request" protobuf:"bytes,1,opt,name=request"`
 
+	// Requested signer for the request. It is a qualified name in the form:
+	// `scope-hostname.io/name`.
+	// If empty, it will be defaulted:
+	//  1. If it's a kubelet client certificate, it is assigned
+	//     "kubernetes.io/kube-apiserver-client-kubelet".
+	//  2. If it's a kubelet serving certificate, it is assigned
+	//     "kubernetes.io/kubelet-serving".
+	//  3. Otherwise, it is assigned "kubernetes.io/legacy-unknown".
+	// Distribution of trust for signers happens out of band.
+	// You can select on this field using `spec.signerName`.
+	// +optional
+	SignerName *string `json:"signerName,omitempty" protobuf:"bytes,7,opt,name=signerName"`
+
 	// allowedUsages specifies a set of usage contexts the key will be
 	// valid for.
 	// See: https://tools.ietf.org/html/rfc5280#section-4.2.1.3
@@ -72,6 +85,28 @@ type CertificateSigningRequestSpec struct {
 	Extra map[string]ExtraValue `json:"extra,omitempty" protobuf:"bytes,6,rep,name=extra"`
 }
 
+// Built in signerName values that are honoured by kube-controller-manager.
+// None of these usages are related to ServiceAccount token secrets
+// `.data[ca.crt]` in any way.
+const (
+	// Signs certificates that will be honored as client-certs by the
+	// kube-apiserver. Never auto-approved by kube-controller-manager.
+	KubeAPIServerClientSignerName = "kubernetes.io/kube-apiserver-client"
+
+	// Signs client certificates that will be honored as client-certs by the
+	// kube-apiserver for a kubelet.
+	// May be auto-approved by kube-controller-manager.
+	KubeAPIServerClientKubeletSignerName = "kubernetes.io/kube-apiserver-client-kubelet"
+
+	// Signs serving certificates that are honored as a valid kubelet serving
+	// certificate by the kube-apiserver, but has no other guarantees.
+	KubeletServingSignerName = "kubernetes.io/kubelet-serving"
+
+	// Has no guarantees for trust at all. Some distributions may honor these
+	// as client certs, but that behavior is not standard kubernetes behavior.
+	LegacyUnknownSignerName = "kubernetes.io/legacy-unknown"
+)
+
 // ExtraValue masks the value so protobuf can generate
 // +protobuf.nullable=true
 // +protobuf.options.(gogoproto.goproto_stringer)=false
diff --git a/vendor/k8s.io/api/certificates/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/certificates/v1beta1/types_swagger_doc_generated.go
index f6a7e16a..a2edb45a 100644
--- a/vendor/k8s.io/api/certificates/v1beta1/types_swagger_doc_generated.go
+++ b/vendor/k8s.io/api/certificates/v1beta1/types_swagger_doc_generated.go
@@ -49,13 +49,14 @@ func (CertificateSigningRequestCondition) SwaggerDoc() map[string]string {
 }
 
 var map_CertificateSigningRequestSpec = map[string]string{
-	"":         "This information is immutable after the request is created. Only the Request and Usages fields can be set on creation, other fields are derived by Kubernetes and cannot be modified by users.",
-	"request":  "Base64-encoded PKCS#10 CSR data",
-	"usages":   "allowedUsages specifies a set of usage contexts the key will be valid for. See: https://tools.ietf.org/html/rfc5280#section-4.2.1.3\n     https://tools.ietf.org/html/rfc5280#section-4.2.1.12",
-	"username": "Information about the requesting user. See user.Info interface for details.",
-	"uid":      "UID information about the requesting user. See user.Info interface for details.",
-	"groups":   "Group information about the requesting user. See user.Info interface for details.",
-	"extra":    "Extra information about the requesting user. See user.Info interface for details.",
+	"":           "This information is immutable after the request is created. Only the Request and Usages fields can be set on creation, other fields are derived by Kubernetes and cannot be modified by users.",
+	"request":    "Base64-encoded PKCS#10 CSR data",
+	"signerName": "Requested signer for the request. It is a qualified name in the form: `scope-hostname.io/name`. If empty, it will be defaulted:\n 1. If it's a kubelet client certificate, it is assigned\n    \"kubernetes.io/kube-apiserver-client-kubelet\".\n 2. If it's a kubelet serving certificate, it is assigned\n    \"kubernetes.io/kubelet-serving\".\n 3. Otherwise, it is assigned \"kubernetes.io/legacy-unknown\".\nDistribution of trust for signers happens out of band. You can select on this field using `spec.signerName`.",
+	"usages":     "allowedUsages specifies a set of usage contexts the key will be valid for. See: https://tools.ietf.org/html/rfc5280#section-4.2.1.3\n     https://tools.ietf.org/html/rfc5280#section-4.2.1.12",
+	"username":   "Information about the requesting user. See user.Info interface for details.",
+	"uid":        "UID information about the requesting user. See user.Info interface for details.",
+	"groups":     "Group information about the requesting user. See user.Info interface for details.",
+	"extra":      "Extra information about the requesting user. See user.Info interface for details.",
 }
 
 func (CertificateSigningRequestSpec) SwaggerDoc() map[string]string {
diff --git a/vendor/k8s.io/api/certificates/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/certificates/v1beta1/zz_generated.deepcopy.go
index b3e0aeb5..11d0f77d 100644
--- a/vendor/k8s.io/api/certificates/v1beta1/zz_generated.deepcopy.go
+++ b/vendor/k8s.io/api/certificates/v1beta1/zz_generated.deepcopy.go
@@ -110,6 +110,11 @@ func (in *CertificateSigningRequestSpec) DeepCopyInto(out *CertificateSigningReq
 		*out = make([]byte, len(*in))
 		copy(*out, *in)
 	}
+	if in.SignerName != nil {
+		in, out := &in.SignerName, &out.SignerName
+		*out = new(string)
+		**out = **in
+	}
 	if in.Usages != nil {
 		in, out := &in.Usages, &out.Usages
 		*out = make([]KeyUsage, len(*in))
diff --git a/vendor/k8s.io/api/coordination/v1/generated.pb.go b/vendor/k8s.io/api/coordination/v1/generated.pb.go
index 7e78be19..22c3d624 100644
--- a/vendor/k8s.io/api/coordination/v1/generated.pb.go
+++ b/vendor/k8s.io/api/coordination/v1/generated.pb.go
@@ -42,7 +42,7 @@ var _ = math.Inf
 // is compatible with the proto package it is being compiled against.
 // A compilation error at this line likely means your copy of the
 // proto package needs to be updated.
-const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
+const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
 
 func (m *Lease) Reset()      { *m = Lease{} }
 func (*Lease) ProtoMessage() {}
@@ -893,6 +893,7 @@ func (m *LeaseSpec) Unmarshal(dAtA []byte) error {
 func skipGenerated(dAtA []byte) (n int, err error) {
 	l := len(dAtA)
 	iNdEx := 0
+	depth := 0
 	for iNdEx < l {
 		var wire uint64
 		for shift := uint(0); ; shift += 7 {
@@ -924,10 +925,8 @@ func skipGenerated(dAtA []byte) (n int, err error) {
 					break
 				}
 			}
-			return iNdEx, nil
 		case 1:
 			iNdEx += 8
-			return iNdEx, nil
 		case 2:
 			var length int
 			for shift := uint(0); ; shift += 7 {
@@ -948,55 +947,30 @@ func skipGenerated(dAtA []byte) (n int, err error) {
 				return 0, ErrInvalidLengthGenerated
 			}
 			iNdEx += length
-			if iNdEx < 0 {
-				return 0, ErrInvalidLengthGenerated
-			}
-			return iNdEx, nil
 		case 3:
-			for {
-				var innerWire uint64
-				var start int = iNdEx
-				for shift := uint(0); ; shift += 7 {
-					if shift >= 64 {
-						return 0, ErrIntOverflowGenerated
-					}
-					if iNdEx >= l {
-						return 0, io.ErrUnexpectedEOF
-					}
-					b := dAtA[iNdEx]
-					iNdEx++
-					innerWire |= (uint64(b) & 0x7F) << shift
-					if b < 0x80 {
-						break
-					}
-				}
-				innerWireType := int(innerWire & 0x7)
-				if innerWireType == 4 {
-					break
-				}
-				next, err := skipGenerated(dAtA[start:])
-				if err != nil {
-					return 0, err
-				}
-				iNdEx = start + next
-				if iNdEx < 0 {
-					return 0, ErrInvalidLengthGenerated
-				}
-			}
-			return iNdEx, nil
+			depth++
 		case 4:
-			return iNdEx, nil
+			if depth == 0 {
+				return 0, ErrUnexpectedEndOfGroupGenerated
+			}
+			depth--
 		case 5:
 			iNdEx += 4
-			return iNdEx, nil
 		default:
 			return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
 		}
+		if iNdEx < 0 {
+			return 0, ErrInvalidLengthGenerated
+		}
+		if depth == 0 {
+			return iNdEx, nil
+		}
 	}
-	panic("unreachable")
+	return 0, io.ErrUnexpectedEOF
 }
 
 var (
-	ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling")
-	ErrIntOverflowGenerated   = fmt.Errorf("proto: integer overflow")
+	ErrInvalidLengthGenerated        = fmt.Errorf("proto: negative length found during unmarshaling")
+	ErrIntOverflowGenerated          = fmt.Errorf("proto: integer overflow")
+	ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group")
 )
diff --git a/vendor/k8s.io/api/coordination/v1beta1/generated.pb.go b/vendor/k8s.io/api/coordination/v1beta1/generated.pb.go
index 2463d625..57a314cf 100644
--- a/vendor/k8s.io/api/coordination/v1beta1/generated.pb.go
+++ b/vendor/k8s.io/api/coordination/v1beta1/generated.pb.go
@@ -42,7 +42,7 @@ var _ = math.Inf
 // is compatible with the proto package it is being compiled against.
 // A compilation error at this line likely means your copy of the
 // proto package needs to be updated.
-const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
+const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
 
 func (m *Lease) Reset()      { *m = Lease{} }
 func (*Lease) ProtoMessage() {}
@@ -893,6 +893,7 @@ func (m *LeaseSpec) Unmarshal(dAtA []byte) error {
 func skipGenerated(dAtA []byte) (n int, err error) {
 	l := len(dAtA)
 	iNdEx := 0
+	depth := 0
 	for iNdEx < l {
 		var wire uint64
 		for shift := uint(0); ; shift += 7 {
@@ -924,10 +925,8 @@ func skipGenerated(dAtA []byte) (n int, err error) {
 					break
 				}
 			}
-			return iNdEx, nil
 		case 1:
 			iNdEx += 8
-			return iNdEx, nil
 		case 2:
 			var length int
 			for shift := uint(0); ; shift += 7 {
@@ -948,55 +947,30 @@ func skipGenerated(dAtA []byte) (n int, err error) {
 				return 0, ErrInvalidLengthGenerated
 			}
 			iNdEx += length
-			if iNdEx < 0 {
-				return 0, ErrInvalidLengthGenerated
-			}
-			return iNdEx, nil
 		case 3:
-			for {
-				var innerWire uint64
-				var start int = iNdEx
-				for shift := uint(0); ; shift += 7 {
-					if shift >= 64 {
-						return 0, ErrIntOverflowGenerated
-					}
-					if iNdEx >= l {
-						return 0, io.ErrUnexpectedEOF
-					}
-					b := dAtA[iNdEx]
-					iNdEx++
-					innerWire |= (uint64(b) & 0x7F) << shift
-					if b < 0x80 {
-						break
-					}
-				}
-				innerWireType := int(innerWire & 0x7)
-				if innerWireType == 4 {
-					break
-				}
-				next, err := skipGenerated(dAtA[start:])
-				if err != nil {
-					return 0, err
-				}
-				iNdEx = start + next
-				if iNdEx < 0 {
-					return 0, ErrInvalidLengthGenerated
-				}
-			}
-			return iNdEx, nil
+			depth++
 		case 4:
-			return iNdEx, nil
+			if depth == 0 {
+				return 0, ErrUnexpectedEndOfGroupGenerated
+			}
+			depth--
 		case 5:
 			iNdEx += 4
-			return iNdEx, nil
 		default:
 			return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
 		}
+		if iNdEx < 0 {
+			return 0, ErrInvalidLengthGenerated
+		}
+		if depth == 0 {
+			return iNdEx, nil
+		}
 	}
-	panic("unreachable")
+	return 0, io.ErrUnexpectedEOF
 }
 
 var (
-	ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling")
-	ErrIntOverflowGenerated   = fmt.Errorf("proto: integer overflow")
+	ErrInvalidLengthGenerated        = fmt.Errorf("proto: negative length found during unmarshaling")
+	ErrIntOverflowGenerated          = fmt.Errorf("proto: integer overflow")
+	ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group")
 )
diff --git a/vendor/k8s.io/api/core/v1/generated.pb.go b/vendor/k8s.io/api/core/v1/generated.pb.go
index 732385ce..8e587520 100644
--- a/vendor/k8s.io/api/core/v1/generated.pb.go
+++ b/vendor/k8s.io/api/core/v1/generated.pb.go
@@ -47,7 +47,7 @@ var _ = math.Inf
 // is compatible with the proto package it is being compiled against.
 // A compilation error at this line likely means your copy of the
 // proto package needs to be updated.
-const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
+const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
 
 func (m *AWSElasticBlockStoreVolumeSource) Reset()      { *m = AWSElasticBlockStoreVolumeSource{} }
 func (*AWSElasticBlockStoreVolumeSource) ProtoMessage() {}
@@ -6000,859 +6000,865 @@ func init() {
 }
 
 var fileDescriptor_83c10c24ec417dc9 = []byte{
-	// 13620 bytes of a gzipped FileDescriptorProto
-	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0xbd, 0x6b, 0x70, 0x24, 0x59,
-	0x5a, 0x18, 0xba, 0x59, 0xa5, 0x47, 0xd5, 0xa7, 0xf7, 0xe9, 0xc7, 0xa8, 0x35, 0xdd, 0xad, 0x9e,
-	0x9c, 0xdd, 0x9e, 0x9e, 0x9d, 0x19, 0xf5, 0xce, 0x6b, 0x67, 0x98, 0x99, 0x1d, 0x90, 0x54, 0x52,
-	0x77, 0x4d, 0xb7, 0xd4, 0x35, 0xa7, 0xd4, 0xdd, 0xbb, 0xc3, 0xec, 0xde, 0x4d, 0x55, 0x1e, 0x49,
-	0x39, 0x2a, 0x65, 0xd6, 0x64, 0x66, 0x49, 0xad, 0xb9, 0x10, 0x97, 0xbb, 0x3c, 0xf7, 0x02, 0x37,
-	0x36, 0x6c, 0xc2, 0x0f, 0x20, 0xb0, 0x03, 0xe3, 0x00, 0x0c, 0x76, 0x18, 0x83, 0x01, 0xef, 0x62,
-	0x1b, 0x83, 0xed, 0xc0, 0xfe, 0x81, 0xb1, 0xc3, 0xf6, 0x12, 0x41, 0x58, 0x86, 0xc6, 0x61, 0x62,
-	0x7f, 0x18, 0x08, 0x83, 0x7f, 0x58, 0x26, 0x8c, 0xe3, 0x3c, 0xf3, 0x9c, 0xac, 0xcc, 0xaa, 0x52,
-	0x8f, 0x5a, 0x3b, 0x6c, 0xcc, 0xbf, 0xaa, 0xf3, 0x7d, 0xe7, 0x3b, 0x27, 0xcf, 0xf3, 0x3b, 0xdf,
-	0x13, 0x5e, 0xdd, 0x7e, 0x39, 0x9a, 0xf3, 0x82, 0xab, 0xdb, 0xed, 0x75, 0x12, 0xfa, 0x24, 0x26,
-	0xd1, 0xd5, 0x5d, 0xe2, 0xbb, 0x41, 0x78, 0x55, 0x00, 0x9c, 0x96, 0x77, 0xb5, 0x11, 0x84, 0xe4,
-	0xea, 0xee, 0xb3, 0x57, 0x37, 0x89, 0x4f, 0x42, 0x27, 0x26, 0xee, 0x5c, 0x2b, 0x0c, 0xe2, 0x00,
-	0x21, 0x8e, 0x33, 0xe7, 0xb4, 0xbc, 0x39, 0x8a, 0x33, 0xb7, 0xfb, 0xec, 0xcc, 0x33, 0x9b, 0x5e,
-	0xbc, 0xd5, 0x5e, 0x9f, 0x6b, 0x04, 0x3b, 0x57, 0x37, 0x83, 0xcd, 0xe0, 0x2a, 0x43, 0x5d, 0x6f,
-	0x6f, 0xb0, 0x7f, 0xec, 0x0f, 0xfb, 0xc5, 0x49, 0xcc, 0xbc, 0x90, 0x34, 0xb3, 0xe3, 0x34, 0xb6,
-	0x3c, 0x9f, 0x84, 0xfb, 0x57, 0x5b, 0xdb, 0x9b, 0xac, 0xdd, 0x90, 0x44, 0x41, 0x3b, 0x6c, 0x90,
-	0x74, 0xc3, 0x5d, 0x6b, 0x45, 0x57, 0x77, 0x48, 0xec, 0x64, 0x74, 0x77, 0xe6, 0x6a, 0x5e, 0xad,
-	0xb0, 0xed, 0xc7, 0xde, 0x4e, 0x67, 0x33, 0x9f, 0xec, 0x55, 0x21, 0x6a, 0x6c, 0x91, 0x1d, 0xa7,
-	0xa3, 0xde, 0xf3, 0x79, 0xf5, 0xda, 0xb1, 0xd7, 0xbc, 0xea, 0xf9, 0x71, 0x14, 0x87, 0xe9, 0x4a,
-	0xf6, 0x57, 0x2d, 0xb8, 0x34, 0x7f, 0xb7, 0xbe, 0xd4, 0x74, 0xa2, 0xd8, 0x6b, 0x2c, 0x34, 0x83,
-	0xc6, 0x76, 0x3d, 0x0e, 0x42, 0x72, 0x27, 0x68, 0xb6, 0x77, 0x48, 0x9d, 0x0d, 0x04, 0x7a, 0x1a,
-	0x4a, 0xbb, 0xec, 0x7f, 0xb5, 0x32, 0x6d, 0x5d, 0xb2, 0xae, 0x94, 0x17, 0x26, 0x7f, 0xe3, 0x60,
-	0xf6, 0x23, 0xf7, 0x0f, 0x66, 0x4b, 0x77, 0x44, 0x39, 0x56, 0x18, 0xe8, 0x32, 0x0c, 0x6d, 0x44,
-	0x6b, 0xfb, 0x2d, 0x32, 0x5d, 0x60, 0xb8, 0xe3, 0x02, 0x77, 0x68, 0xb9, 0x4e, 0x4b, 0xb1, 0x80,
-	0xa2, 0xab, 0x50, 0x6e, 0x39, 0x61, 0xec, 0xc5, 0x5e, 0xe0, 0x4f, 0x17, 0x2f, 0x59, 0x57, 0x06,
-	0x17, 0xa6, 0x04, 0x6a, 0xb9, 0x26, 0x01, 0x38, 0xc1, 0xa1, 0xdd, 0x08, 0x89, 0xe3, 0xde, 0xf2,
-	0x9b, 0xfb, 0xd3, 0x03, 0x97, 0xac, 0x2b, 0xa5, 0xa4, 0x1b, 0x58, 0x94, 0x63, 0x85, 0x61, 0xff,
-	0x70, 0x01, 0x4a, 0xf3, 0x1b, 0x1b, 0x9e, 0xef, 0xc5, 0xfb, 0xe8, 0x0e, 0x8c, 0xfa, 0x81, 0x4b,
-	0xe4, 0x7f, 0xf6, 0x15, 0x23, 0xcf, 0x5d, 0x9a, 0xeb, 0x5c, 0x4a, 0x73, 0xab, 0x1a, 0xde, 0xc2,
-	0xe4, 0xfd, 0x83, 0xd9, 0x51, 0xbd, 0x04, 0x1b, 0x74, 0x10, 0x86, 0x91, 0x56, 0xe0, 0x2a, 0xb2,
-	0x05, 0x46, 0x76, 0x36, 0x8b, 0x6c, 0x2d, 0x41, 0x5b, 0x98, 0xb8, 0x7f, 0x30, 0x3b, 0xa2, 0x15,
-	0x60, 0x9d, 0x08, 0x5a, 0x87, 0x09, 0xfa, 0xd7, 0x8f, 0x3d, 0x45, 0xb7, 0xc8, 0xe8, 0x3e, 0x9e,
-	0x47, 0x57, 0x43, 0x5d, 0x38, 0x75, 0xff, 0x60, 0x76, 0x22, 0x55, 0x88, 0xd3, 0x04, 0xed, 0xf7,
-	0x60, 0x7c, 0x3e, 0x8e, 0x9d, 0xc6, 0x16, 0x71, 0xf9, 0x0c, 0xa2, 0x17, 0x60, 0xc0, 0x77, 0x76,
-	0x88, 0x98, 0xdf, 0x4b, 0x62, 0x60, 0x07, 0x56, 0x9d, 0x1d, 0x72, 0x78, 0x30, 0x3b, 0x79, 0xdb,
-	0xf7, 0xde, 0x6d, 0x8b, 0x55, 0x41, 0xcb, 0x30, 0xc3, 0x46, 0xcf, 0x01, 0xb8, 0x64, 0xd7, 0x6b,
-	0x90, 0x9a, 0x13, 0x6f, 0x89, 0xf9, 0x46, 0xa2, 0x2e, 0x54, 0x14, 0x04, 0x6b, 0x58, 0xf6, 0x3d,
-	0x28, 0xcf, 0xef, 0x06, 0x9e, 0x5b, 0x0b, 0xdc, 0x08, 0x6d, 0xc3, 0x44, 0x2b, 0x24, 0x1b, 0x24,
-	0x54, 0x45, 0xd3, 0xd6, 0xa5, 0xe2, 0x95, 0x91, 0xe7, 0xae, 0x64, 0x7e, 0xac, 0x89, 0xba, 0xe4,
-	0xc7, 0xe1, 0xfe, 0xc2, 0x23, 0xa2, 0xbd, 0x89, 0x14, 0x14, 0xa7, 0x29, 0xdb, 0xff, 0xbc, 0x00,
-	0x67, 0xe6, 0xdf, 0x6b, 0x87, 0xa4, 0xe2, 0x45, 0xdb, 0xe9, 0x15, 0xee, 0x7a, 0xd1, 0xf6, 0x6a,
-	0x32, 0x02, 0x6a, 0x69, 0x55, 0x44, 0x39, 0x56, 0x18, 0xe8, 0x19, 0x18, 0xa6, 0xbf, 0x6f, 0xe3,
-	0xaa, 0xf8, 0xe4, 0x53, 0x02, 0x79, 0xa4, 0xe2, 0xc4, 0x4e, 0x85, 0x83, 0xb0, 0xc4, 0x41, 0x2b,
-	0x30, 0xd2, 0x60, 0x1b, 0x72, 0x73, 0x25, 0x70, 0x09, 0x9b, 0xcc, 0xf2, 0xc2, 0x53, 0x14, 0x7d,
-	0x31, 0x29, 0x3e, 0x3c, 0x98, 0x9d, 0xe6, 0x7d, 0x13, 0x24, 0x34, 0x18, 0xd6, 0xeb, 0x23, 0x5b,
-	0xed, 0xaf, 0x01, 0x46, 0x09, 0x32, 0xf6, 0xd6, 0x15, 0x6d, 0xab, 0x0c, 0xb2, 0xad, 0x32, 0x9a,
-	0xbd, 0x4d, 0xd0, 0xb3, 0x30, 0xb0, 0xed, 0xf9, 0xee, 0xf4, 0x10, 0xa3, 0x75, 0x81, 0xce, 0xf9,
-	0x0d, 0xcf, 0x77, 0x0f, 0x0f, 0x66, 0xa7, 0x8c, 0xee, 0xd0, 0x42, 0xcc, 0x50, 0xed, 0x3f, 0xb1,
-	0x60, 0x96, 0xc1, 0x96, 0xbd, 0x26, 0xa9, 0x91, 0x30, 0xf2, 0xa2, 0x98, 0xf8, 0xb1, 0x31, 0xa0,
-	0xcf, 0x01, 0x44, 0xa4, 0x11, 0x92, 0x58, 0x1b, 0x52, 0xb5, 0x30, 0xea, 0x0a, 0x82, 0x35, 0x2c,
-	0x7a, 0x20, 0x44, 0x5b, 0x4e, 0xc8, 0xd6, 0x97, 0x18, 0x58, 0x75, 0x20, 0xd4, 0x25, 0x00, 0x27,
-	0x38, 0xc6, 0x81, 0x50, 0xec, 0x75, 0x20, 0xa0, 0x4f, 0xc1, 0x44, 0xd2, 0x58, 0xd4, 0x72, 0x1a,
-	0x72, 0x00, 0xd9, 0x96, 0xa9, 0x9b, 0x20, 0x9c, 0xc6, 0xb5, 0xff, 0x8e, 0x25, 0x16, 0x0f, 0xfd,
-	0xea, 0x0f, 0xf8, 0xb7, 0xda, 0xbf, 0x6c, 0xc1, 0xf0, 0x82, 0xe7, 0xbb, 0x9e, 0xbf, 0x89, 0x3e,
-	0x0f, 0x25, 0x7a, 0x37, 0xb9, 0x4e, 0xec, 0x88, 0x73, 0xef, 0x13, 0xda, 0xde, 0x52, 0x57, 0xc5,
-	0x5c, 0x6b, 0x7b, 0x93, 0x16, 0x44, 0x73, 0x14, 0x9b, 0xee, 0xb6, 0x5b, 0xeb, 0xef, 0x90, 0x46,
-	0xbc, 0x42, 0x62, 0x27, 0xf9, 0x9c, 0xa4, 0x0c, 0x2b, 0xaa, 0xe8, 0x06, 0x0c, 0xc5, 0x4e, 0xb8,
-	0x49, 0x62, 0x71, 0x00, 0x66, 0x1e, 0x54, 0xbc, 0x26, 0xa6, 0x3b, 0x92, 0xf8, 0x0d, 0x92, 0x5c,
-	0x0b, 0x6b, 0xac, 0x2a, 0x16, 0x24, 0xec, 0x1f, 0x1c, 0x86, 0x73, 0x8b, 0xf5, 0x6a, 0xce, 0xba,
-	0xba, 0x0c, 0x43, 0x6e, 0xe8, 0xed, 0x92, 0x50, 0x8c, 0xb3, 0xa2, 0x52, 0x61, 0xa5, 0x58, 0x40,
-	0xd1, 0xcb, 0x30, 0xca, 0x2f, 0xa4, 0xeb, 0x8e, 0xef, 0x36, 0xe5, 0x10, 0x9f, 0x16, 0xd8, 0xa3,
-	0x77, 0x34, 0x18, 0x36, 0x30, 0x8f, 0xb8, 0xa8, 0x2e, 0xa7, 0x36, 0x63, 0xde, 0x65, 0xf7, 0x45,
-	0x0b, 0x26, 0x79, 0x33, 0xf3, 0x71, 0x1c, 0x7a, 0xeb, 0xed, 0x98, 0x44, 0xd3, 0x83, 0xec, 0xa4,
-	0x5b, 0xcc, 0x1a, 0xad, 0xdc, 0x11, 0x98, 0xbb, 0x93, 0xa2, 0xc2, 0x0f, 0xc1, 0x69, 0xd1, 0xee,
-	0x64, 0x1a, 0x8c, 0x3b, 0x9a, 0x45, 0xdf, 0x69, 0xc1, 0x4c, 0x23, 0xf0, 0xe3, 0x30, 0x68, 0x36,
-	0x49, 0x58, 0x6b, 0xaf, 0x37, 0xbd, 0x68, 0x8b, 0xaf, 0x53, 0x4c, 0x36, 0xd8, 0x49, 0x90, 0x33,
-	0x87, 0x0a, 0x49, 0xcc, 0xe1, 0xc5, 0xfb, 0x07, 0xb3, 0x33, 0x8b, 0xb9, 0xa4, 0x70, 0x97, 0x66,
-	0xd0, 0x36, 0x20, 0x7a, 0x95, 0xd6, 0x63, 0x67, 0x93, 0x24, 0x8d, 0x0f, 0xf7, 0xdf, 0xf8, 0xd9,
-	0xfb, 0x07, 0xb3, 0x68, 0xb5, 0x83, 0x04, 0xce, 0x20, 0x8b, 0xde, 0x85, 0xd3, 0xb4, 0xb4, 0xe3,
-	0x5b, 0x4b, 0xfd, 0x37, 0x37, 0x7d, 0xff, 0x60, 0xf6, 0xf4, 0x6a, 0x06, 0x11, 0x9c, 0x49, 0x1a,
-	0x7d, 0x87, 0x05, 0xe7, 0x92, 0xcf, 0x5f, 0xba, 0xd7, 0x72, 0x7c, 0x37, 0x69, 0xb8, 0xdc, 0x7f,
-	0xc3, 0xf4, 0x4c, 0x3e, 0xb7, 0x98, 0x47, 0x09, 0xe7, 0x37, 0x32, 0xb3, 0x08, 0x67, 0x32, 0x57,
-	0x0b, 0x9a, 0x84, 0xe2, 0x36, 0xe1, 0x5c, 0x50, 0x19, 0xd3, 0x9f, 0xe8, 0x34, 0x0c, 0xee, 0x3a,
-	0xcd, 0xb6, 0xd8, 0x28, 0x98, 0xff, 0x79, 0xa5, 0xf0, 0xb2, 0x65, 0xff, 0x8b, 0x22, 0x4c, 0x2c,
-	0xd6, 0xab, 0x0f, 0xb4, 0x0b, 0xf5, 0x6b, 0xa8, 0xd0, 0xf5, 0x1a, 0x4a, 0x2e, 0xb5, 0x62, 0xee,
-	0xa5, 0xf6, 0xff, 0x64, 0x6c, 0xa1, 0x01, 0xb6, 0x85, 0xbe, 0x29, 0x67, 0x0b, 0x1d, 0xf3, 0xc6,
-	0xd9, 0xcd, 0x59, 0x45, 0x83, 0x6c, 0x32, 0x33, 0x39, 0x96, 0x9b, 0x41, 0xc3, 0x69, 0xa6, 0x8f,
-	0xbe, 0x23, 0x2e, 0xa5, 0xe3, 0x99, 0xc7, 0x06, 0x8c, 0x2e, 0x3a, 0x2d, 0x67, 0xdd, 0x6b, 0x7a,
-	0xb1, 0x47, 0x22, 0xf4, 0x04, 0x14, 0x1d, 0xd7, 0x65, 0xdc, 0x56, 0x79, 0xe1, 0xcc, 0xfd, 0x83,
-	0xd9, 0xe2, 0xbc, 0x4b, 0xaf, 0x7d, 0x50, 0x58, 0xfb, 0x98, 0x62, 0xa0, 0x8f, 0xc3, 0x80, 0x1b,
-	0x06, 0xad, 0xe9, 0x02, 0xc3, 0xa4, 0xbb, 0x6e, 0xa0, 0x12, 0x06, 0xad, 0x14, 0x2a, 0xc3, 0xb1,
-	0x7f, 0xb5, 0x00, 0xe7, 0x17, 0x49, 0x6b, 0x6b, 0xb9, 0x9e, 0x73, 0x7e, 0x5f, 0x81, 0xd2, 0x4e,
-	0xe0, 0x7b, 0x71, 0x10, 0x46, 0xa2, 0x69, 0xb6, 0x22, 0x56, 0x44, 0x19, 0x56, 0x50, 0x74, 0x09,
-	0x06, 0x5a, 0x09, 0x53, 0x39, 0x2a, 0x19, 0x52, 0xc6, 0x4e, 0x32, 0x08, 0xc5, 0x68, 0x47, 0x24,
-	0x14, 0x2b, 0x46, 0x61, 0xdc, 0x8e, 0x48, 0x88, 0x19, 0x24, 0xb9, 0x99, 0xe9, 0x9d, 0x2d, 0x4e,
-	0xe8, 0xd4, 0xcd, 0x4c, 0x21, 0x58, 0xc3, 0x42, 0x35, 0x28, 0x47, 0xa9, 0x99, 0xed, 0x6b, 0x9b,
-	0x8e, 0xb1, 0xab, 0x5b, 0xcd, 0x64, 0x42, 0xc4, 0xb8, 0x51, 0x86, 0x7a, 0x5e, 0xdd, 0x5f, 0x29,
-	0x00, 0xe2, 0x43, 0xf8, 0x17, 0x6c, 0xe0, 0x6e, 0x77, 0x0e, 0x5c, 0xff, 0x5b, 0xe2, 0xb8, 0x46,
-	0xef, 0x4f, 0x2d, 0x38, 0xbf, 0xe8, 0xf9, 0x2e, 0x09, 0x73, 0x16, 0xe0, 0xc3, 0x79, 0xcb, 0x1e,
-	0x8d, 0x69, 0x30, 0x96, 0xd8, 0xc0, 0x31, 0x2c, 0x31, 0xfb, 0x8f, 0x2c, 0x40, 0xfc, 0xb3, 0x3f,
-	0x70, 0x1f, 0x7b, 0xbb, 0xf3, 0x63, 0x8f, 0x61, 0x59, 0xd8, 0x37, 0x61, 0x7c, 0xb1, 0xe9, 0x11,
-	0x3f, 0xae, 0xd6, 0x16, 0x03, 0x7f, 0xc3, 0xdb, 0x44, 0xaf, 0xc0, 0x78, 0xec, 0xed, 0x90, 0xa0,
-	0x1d, 0xd7, 0x49, 0x23, 0xf0, 0xd9, 0x4b, 0xd2, 0xba, 0x32, 0xb8, 0x80, 0xee, 0x1f, 0xcc, 0x8e,
-	0xaf, 0x19, 0x10, 0x9c, 0xc2, 0xb4, 0x7f, 0x87, 0x8e, 0x5f, 0xb0, 0xd3, 0x0a, 0x7c, 0xe2, 0xc7,
-	0x8b, 0x81, 0xef, 0x72, 0x89, 0xc3, 0x2b, 0x30, 0x10, 0xd3, 0xf1, 0xe0, 0x63, 0x77, 0x59, 0x6e,
-	0x14, 0x3a, 0x0a, 0x87, 0x07, 0xb3, 0x67, 0x3b, 0x6b, 0xb0, 0x71, 0x62, 0x75, 0xd0, 0x37, 0xc1,
-	0x50, 0x14, 0x3b, 0x71, 0x3b, 0x12, 0xa3, 0xf9, 0x98, 0x1c, 0xcd, 0x3a, 0x2b, 0x3d, 0x3c, 0x98,
-	0x9d, 0x50, 0xd5, 0x78, 0x11, 0x16, 0x15, 0xd0, 0x93, 0x30, 0xbc, 0x43, 0xa2, 0xc8, 0xd9, 0x94,
-	0xb7, 0xe1, 0x84, 0xa8, 0x3b, 0xbc, 0xc2, 0x8b, 0xb1, 0x84, 0xa3, 0xc7, 0x61, 0x90, 0x84, 0x61,
-	0x10, 0x8a, 0x3d, 0x3a, 0x26, 0x10, 0x07, 0x97, 0x68, 0x21, 0xe6, 0x30, 0xfb, 0xdf, 0x58, 0x30,
-	0xa1, 0xfa, 0xca, 0xdb, 0x3a, 0x81, 0x57, 0xc1, 0x5b, 0x00, 0x0d, 0xf9, 0x81, 0x11, 0xbb, 0x3d,
-	0x46, 0x9e, 0xbb, 0x9c, 0x79, 0x51, 0x77, 0x0c, 0x63, 0x42, 0x59, 0x15, 0x45, 0x58, 0xa3, 0x66,
-	0xff, 0x63, 0x0b, 0x4e, 0xa5, 0xbe, 0xe8, 0xa6, 0x17, 0xc5, 0xe8, 0xed, 0x8e, 0xaf, 0x9a, 0xeb,
-	0xef, 0xab, 0x68, 0x6d, 0xf6, 0x4d, 0x6a, 0x29, 0xcb, 0x12, 0xed, 0x8b, 0xae, 0xc3, 0xa0, 0x17,
-	0x93, 0x1d, 0xf9, 0x31, 0x8f, 0x77, 0xfd, 0x18, 0xde, 0xab, 0x64, 0x46, 0xaa, 0xb4, 0x26, 0xe6,
-	0x04, 0xec, 0xbf, 0x5c, 0x84, 0x32, 0x5f, 0xb6, 0x2b, 0x4e, 0xeb, 0x04, 0xe6, 0xa2, 0x0a, 0x03,
-	0x8c, 0x3a, 0xef, 0xf8, 0x13, 0xd9, 0x1d, 0x17, 0xdd, 0x99, 0xa3, 0x4f, 0x7e, 0xce, 0x1c, 0xa9,
-	0xab, 0x81, 0x16, 0x61, 0x46, 0x02, 0x39, 0x00, 0xeb, 0x9e, 0xef, 0x84, 0xfb, 0xb4, 0x6c, 0xba,
-	0xc8, 0x08, 0x3e, 0xd3, 0x9d, 0xe0, 0x82, 0xc2, 0xe7, 0x64, 0x55, 0x5f, 0x13, 0x00, 0xd6, 0x88,
-	0xce, 0xbc, 0x04, 0x65, 0x85, 0x7c, 0x14, 0x1e, 0x67, 0xe6, 0x53, 0x30, 0x91, 0x6a, 0xab, 0x57,
-	0xf5, 0x51, 0x9d, 0x45, 0xfa, 0x32, 0x3b, 0x05, 0x44, 0xaf, 0x97, 0xfc, 0x5d, 0x71, 0x8a, 0xbe,
-	0x07, 0xa7, 0x9b, 0x19, 0x87, 0x93, 0x98, 0xaa, 0xfe, 0x0f, 0xb3, 0xf3, 0xe2, 0xb3, 0x4f, 0x67,
-	0x41, 0x71, 0x66, 0x1b, 0xf4, 0xda, 0x0f, 0x5a, 0x74, 0xcd, 0x3b, 0x4d, 0x9d, 0x83, 0xbe, 0x25,
-	0xca, 0xb0, 0x82, 0xd2, 0x23, 0xec, 0xb4, 0xea, 0xfc, 0x0d, 0xb2, 0x5f, 0x27, 0x4d, 0xd2, 0x88,
-	0x83, 0xf0, 0xeb, 0xda, 0xfd, 0x0b, 0x7c, 0xf4, 0xf9, 0x09, 0x38, 0x22, 0x08, 0x14, 0x6f, 0x90,
-	0x7d, 0x3e, 0x15, 0xfa, 0xd7, 0x15, 0xbb, 0x7e, 0xdd, 0xcf, 0x59, 0x30, 0xa6, 0xbe, 0xee, 0x04,
-	0xb6, 0xfa, 0x82, 0xb9, 0xd5, 0x2f, 0x74, 0x5d, 0xe0, 0x39, 0x9b, 0xfc, 0x2b, 0x05, 0x38, 0xa7,
-	0x70, 0x28, 0xbb, 0xcf, 0xff, 0x88, 0x55, 0x75, 0x15, 0xca, 0xbe, 0x12, 0x44, 0x59, 0xa6, 0x04,
-	0x28, 0x11, 0x43, 0x25, 0x38, 0x94, 0x6b, 0xf3, 0x13, 0x69, 0xd1, 0xa8, 0x2e, 0xa1, 0x15, 0xd2,
-	0xd8, 0x05, 0x28, 0xb6, 0x3d, 0x57, 0xdc, 0x19, 0x9f, 0x90, 0xa3, 0x7d, 0xbb, 0x5a, 0x39, 0x3c,
-	0x98, 0x7d, 0x2c, 0x4f, 0x3b, 0x40, 0x2f, 0xab, 0x68, 0xee, 0x76, 0xb5, 0x82, 0x69, 0x65, 0x34,
-	0x0f, 0x13, 0x52, 0x01, 0x72, 0x87, 0x72, 0x50, 0x81, 0x2f, 0xae, 0x16, 0x25, 0x66, 0xc5, 0x26,
-	0x18, 0xa7, 0xf1, 0x51, 0x05, 0x26, 0xb7, 0xdb, 0xeb, 0xa4, 0x49, 0x62, 0xfe, 0xc1, 0x37, 0x08,
-	0x17, 0x42, 0x96, 0x93, 0xc7, 0xd6, 0x8d, 0x14, 0x1c, 0x77, 0xd4, 0xb0, 0xff, 0x9c, 0x1d, 0xf1,
-	0x62, 0xf4, 0x6a, 0x61, 0x40, 0x17, 0x16, 0xa5, 0xfe, 0xf5, 0x5c, 0xce, 0xfd, 0xac, 0x8a, 0x1b,
-	0x64, 0x7f, 0x2d, 0xa0, 0xcc, 0x76, 0xf6, 0xaa, 0x30, 0xd6, 0xfc, 0x40, 0xd7, 0x35, 0xff, 0x0b,
-	0x05, 0x38, 0xa3, 0x46, 0xc0, 0xe0, 0xeb, 0xfe, 0xa2, 0x8f, 0xc1, 0xb3, 0x30, 0xe2, 0x92, 0x0d,
-	0xa7, 0xdd, 0x8c, 0x95, 0x44, 0x7c, 0x90, 0x6b, 0x45, 0x2a, 0x49, 0x31, 0xd6, 0x71, 0x8e, 0x30,
-	0x6c, 0xff, 0x63, 0x84, 0xdd, 0xad, 0xb1, 0x43, 0xd7, 0xb8, 0xda, 0x35, 0x56, 0xee, 0xae, 0x79,
-	0x1c, 0x06, 0xbd, 0x1d, 0xca, 0x6b, 0x15, 0x4c, 0x16, 0xaa, 0x4a, 0x0b, 0x31, 0x87, 0xa1, 0x8f,
-	0xc1, 0x70, 0x23, 0xd8, 0xd9, 0x71, 0x7c, 0x97, 0x5d, 0x79, 0xe5, 0x85, 0x11, 0xca, 0x8e, 0x2d,
-	0xf2, 0x22, 0x2c, 0x61, 0xe8, 0x3c, 0x0c, 0x38, 0xe1, 0x26, 0x17, 0x4b, 0x94, 0x17, 0x4a, 0xb4,
-	0xa5, 0xf9, 0x70, 0x33, 0xc2, 0xac, 0x94, 0xbe, 0xaa, 0xf6, 0x82, 0x70, 0xdb, 0xf3, 0x37, 0x2b,
-	0x5e, 0x28, 0xb6, 0x84, 0xba, 0x0b, 0xef, 0x2a, 0x08, 0xd6, 0xb0, 0xd0, 0x32, 0x0c, 0xb6, 0x82,
-	0x30, 0x8e, 0xa6, 0x87, 0xd8, 0x70, 0x3f, 0x96, 0x73, 0x10, 0xf1, 0xaf, 0xad, 0x05, 0x61, 0x9c,
-	0x7c, 0x00, 0xfd, 0x17, 0x61, 0x5e, 0x1d, 0xdd, 0x84, 0x61, 0xe2, 0xef, 0x2e, 0x87, 0xc1, 0xce,
-	0xf4, 0xa9, 0x7c, 0x4a, 0x4b, 0x1c, 0x85, 0x2f, 0xb3, 0x84, 0xed, 0x14, 0xc5, 0x58, 0x92, 0x40,
-	0xdf, 0x04, 0x45, 0xe2, 0xef, 0x4e, 0x0f, 0x33, 0x4a, 0x33, 0x39, 0x94, 0xee, 0x38, 0x61, 0x72,
-	0xe6, 0x2f, 0xf9, 0xbb, 0x98, 0xd6, 0x41, 0x9f, 0x81, 0xb2, 0x3c, 0x30, 0x22, 0x21, 0x7f, 0xcb,
-	0x5c, 0xb0, 0xf2, 0x98, 0xc1, 0xe4, 0xdd, 0xb6, 0x17, 0x92, 0x1d, 0xe2, 0xc7, 0x51, 0x72, 0x42,
-	0x4a, 0x68, 0x84, 0x13, 0x6a, 0xe8, 0x33, 0x52, 0xe8, 0xbb, 0x12, 0xb4, 0xfd, 0x38, 0x9a, 0x2e,
-	0xb3, 0xee, 0x65, 0xaa, 0xe3, 0xee, 0x24, 0x78, 0x69, 0xa9, 0x30, 0xaf, 0x8c, 0x0d, 0x52, 0xe8,
-	0xb3, 0x30, 0xc6, 0xff, 0x73, 0xa5, 0x56, 0x34, 0x7d, 0x86, 0xd1, 0xbe, 0x94, 0x4f, 0x9b, 0x23,
-	0x2e, 0x9c, 0x11, 0xc4, 0xc7, 0xf4, 0xd2, 0x08, 0x9b, 0xd4, 0x10, 0x86, 0xb1, 0xa6, 0xb7, 0x4b,
-	0x7c, 0x12, 0x45, 0xb5, 0x30, 0x58, 0x27, 0xd3, 0xc0, 0x06, 0xe6, 0x5c, 0xb6, 0x12, 0x2c, 0x58,
-	0x27, 0x0b, 0x53, 0x94, 0xe6, 0x4d, 0xbd, 0x0e, 0x36, 0x49, 0xa0, 0xdb, 0x30, 0x4e, 0x1f, 0x61,
-	0x5e, 0x42, 0x74, 0xa4, 0x17, 0x51, 0xf6, 0x54, 0xc2, 0x46, 0x25, 0x9c, 0x22, 0x82, 0x6e, 0xc1,
-	0x68, 0x14, 0x3b, 0x61, 0xdc, 0x6e, 0x71, 0xa2, 0x67, 0x7b, 0x11, 0x65, 0x3a, 0xd4, 0xba, 0x56,
-	0x05, 0x1b, 0x04, 0xd0, 0x1b, 0x50, 0x6e, 0x7a, 0x1b, 0xa4, 0xb1, 0xdf, 0x68, 0x92, 0xe9, 0x51,
-	0x46, 0x2d, 0xf3, 0x50, 0xb9, 0x29, 0x91, 0xf8, 0xab, 0x50, 0xfd, 0xc5, 0x49, 0x75, 0x74, 0x07,
-	0xce, 0xc6, 0x24, 0xdc, 0xf1, 0x7c, 0x87, 0x1e, 0x06, 0xe2, 0xb5, 0xc4, 0x74, 0x93, 0x63, 0x6c,
-	0xb7, 0x5d, 0x14, 0xb3, 0x71, 0x76, 0x2d, 0x13, 0x0b, 0xe7, 0xd4, 0x46, 0xf7, 0x60, 0x3a, 0x03,
-	0x12, 0x34, 0xbd, 0xc6, 0xfe, 0xf4, 0x69, 0x46, 0xf9, 0x35, 0x41, 0x79, 0x7a, 0x2d, 0x07, 0xef,
-	0xb0, 0x0b, 0x0c, 0xe7, 0x52, 0x47, 0xb7, 0x60, 0x82, 0x9d, 0x40, 0xb5, 0x76, 0xb3, 0x29, 0x1a,
-	0x1c, 0x67, 0x0d, 0x7e, 0x4c, 0xde, 0xc7, 0x55, 0x13, 0x7c, 0x78, 0x30, 0x0b, 0xc9, 0x3f, 0x9c,
-	0xae, 0x8d, 0xd6, 0x99, 0x1a, 0xac, 0x1d, 0x7a, 0xf1, 0x3e, 0x3d, 0x37, 0xc8, 0xbd, 0x78, 0x7a,
-	0xa2, 0xab, 0x08, 0x42, 0x47, 0x55, 0xba, 0x32, 0xbd, 0x10, 0xa7, 0x09, 0xd2, 0x23, 0x35, 0x8a,
-	0x5d, 0xcf, 0x9f, 0x9e, 0x64, 0x27, 0xb5, 0x3a, 0x91, 0xea, 0xb4, 0x10, 0x73, 0x18, 0x53, 0x81,
-	0xd1, 0x1f, 0xb7, 0xe8, 0xcd, 0x35, 0xc5, 0x10, 0x13, 0x15, 0x98, 0x04, 0xe0, 0x04, 0x87, 0x32,
-	0x93, 0x71, 0xbc, 0x3f, 0x8d, 0x18, 0xaa, 0x3a, 0x58, 0xd6, 0xd6, 0x3e, 0x83, 0x69, 0xb9, 0xbd,
-	0x0e, 0xe3, 0xea, 0x20, 0x64, 0x63, 0x82, 0x66, 0x61, 0x90, 0xb1, 0x4f, 0x42, 0x60, 0x56, 0xa6,
-	0x5d, 0x60, 0xac, 0x15, 0xe6, 0xe5, 0xac, 0x0b, 0xde, 0x7b, 0x64, 0x61, 0x3f, 0x26, 0xfc, 0x99,
-	0x5e, 0xd4, 0xba, 0x20, 0x01, 0x38, 0xc1, 0xb1, 0xff, 0x37, 0x67, 0x43, 0x93, 0xd3, 0xb6, 0x8f,
-	0xfb, 0xe5, 0x69, 0x28, 0x6d, 0x05, 0x51, 0x4c, 0xb1, 0x59, 0x1b, 0x83, 0x09, 0xe3, 0x79, 0x5d,
-	0x94, 0x63, 0x85, 0x81, 0x5e, 0x85, 0xb1, 0x86, 0xde, 0x80, 0xb8, 0x1c, 0xd5, 0x31, 0x62, 0xb4,
-	0x8e, 0x4d, 0x5c, 0xf4, 0x32, 0x94, 0x98, 0x59, 0x47, 0x23, 0x68, 0x0a, 0xae, 0x4d, 0xde, 0xf0,
-	0xa5, 0x9a, 0x28, 0x3f, 0xd4, 0x7e, 0x63, 0x85, 0x8d, 0x2e, 0xc3, 0x10, 0xed, 0x42, 0xb5, 0x26,
-	0xae, 0x25, 0x25, 0xfb, 0xb9, 0xce, 0x4a, 0xb1, 0x80, 0xda, 0x7f, 0xa9, 0xa0, 0x8d, 0x32, 0x7d,
-	0xe2, 0x12, 0x54, 0x83, 0xe1, 0x3d, 0xc7, 0x8b, 0x3d, 0x7f, 0x53, 0xf0, 0x1f, 0x4f, 0x76, 0xbd,
-	0xa3, 0x58, 0xa5, 0xbb, 0xbc, 0x02, 0xbf, 0x45, 0xc5, 0x1f, 0x2c, 0xc9, 0x50, 0x8a, 0x61, 0xdb,
-	0xf7, 0x29, 0xc5, 0x42, 0xbf, 0x14, 0x31, 0xaf, 0xc0, 0x29, 0x8a, 0x3f, 0x58, 0x92, 0x41, 0x6f,
-	0x03, 0xc8, 0x1d, 0x46, 0x5c, 0x61, 0x4e, 0xf1, 0x74, 0x6f, 0xa2, 0x6b, 0xaa, 0xce, 0xc2, 0x38,
-	0xbd, 0xa3, 0x93, 0xff, 0x58, 0xa3, 0x67, 0xc7, 0x8c, 0x4f, 0xeb, 0xec, 0x0c, 0xfa, 0x56, 0xba,
-	0xc4, 0x9d, 0x30, 0x26, 0xee, 0x7c, 0x2c, 0x06, 0xe7, 0xe3, 0xfd, 0x3d, 0x52, 0xd6, 0xbc, 0x1d,
-	0xa2, 0x6f, 0x07, 0x41, 0x04, 0x27, 0xf4, 0xec, 0x5f, 0x2a, 0xc2, 0x74, 0x5e, 0x77, 0xe9, 0xa2,
-	0x23, 0xf7, 0xbc, 0x78, 0x91, 0xb2, 0x57, 0x96, 0xb9, 0xe8, 0x96, 0x44, 0x39, 0x56, 0x18, 0x74,
-	0xf6, 0x23, 0x6f, 0x53, 0xbe, 0x31, 0x07, 0x93, 0xd9, 0xaf, 0xb3, 0x52, 0x2c, 0xa0, 0x14, 0x2f,
-	0x24, 0x4e, 0x24, 0xec, 0x75, 0xb4, 0x55, 0x82, 0x59, 0x29, 0x16, 0x50, 0x5d, 0x80, 0x35, 0xd0,
-	0x43, 0x80, 0x65, 0x0c, 0xd1, 0xe0, 0xf1, 0x0e, 0x11, 0xfa, 0x1c, 0xc0, 0x86, 0xe7, 0x7b, 0xd1,
-	0x16, 0xa3, 0x3e, 0x74, 0x64, 0xea, 0x8a, 0x39, 0x5b, 0x56, 0x54, 0xb0, 0x46, 0x11, 0xbd, 0x08,
-	0x23, 0x6a, 0x03, 0x56, 0x2b, 0x4c, 0x79, 0xa9, 0x19, 0x83, 0x24, 0xa7, 0x51, 0x05, 0xeb, 0x78,
-	0xf6, 0x3b, 0xe9, 0xf5, 0x22, 0x76, 0x80, 0x36, 0xbe, 0x56, 0xbf, 0xe3, 0x5b, 0xe8, 0x3e, 0xbe,
-	0xf6, 0xd7, 0x8a, 0x30, 0x61, 0x34, 0xd6, 0x8e, 0xfa, 0x38, 0xb3, 0xae, 0xd1, 0x03, 0xdc, 0x89,
-	0x89, 0xd8, 0x7f, 0x76, 0xef, 0xad, 0xa2, 0x1f, 0xf2, 0x74, 0x07, 0xf0, 0xfa, 0xe8, 0x73, 0x50,
-	0x6e, 0x3a, 0x11, 0x13, 0x86, 0x11, 0xb1, 0xef, 0xfa, 0x21, 0x96, 0x3c, 0x4c, 0x9c, 0x28, 0xd6,
-	0x6e, 0x4d, 0x4e, 0x3b, 0x21, 0x49, 0x6f, 0x1a, 0xca, 0x9f, 0x48, 0x83, 0x30, 0xd5, 0x09, 0xca,
-	0xc4, 0xec, 0x63, 0x0e, 0x43, 0x2f, 0xc3, 0x68, 0x48, 0xd8, 0xaa, 0x58, 0xa4, 0xdc, 0x1c, 0x5b,
-	0x66, 0x83, 0x09, 0xdb, 0x87, 0x35, 0x18, 0x36, 0x30, 0x93, 0xb7, 0xc1, 0x50, 0x97, 0xb7, 0xc1,
-	0x93, 0x30, 0xcc, 0x7e, 0xa8, 0x15, 0xa0, 0x66, 0xa3, 0xca, 0x8b, 0xb1, 0x84, 0xa7, 0x17, 0x4c,
-	0xa9, 0xbf, 0x05, 0x43, 0x5f, 0x1f, 0x62, 0x51, 0x33, 0xc5, 0x71, 0x89, 0x9f, 0x72, 0x62, 0xc9,
-	0x63, 0x09, 0xb3, 0x3f, 0x0e, 0xe3, 0x15, 0x87, 0xec, 0x04, 0xfe, 0x92, 0xef, 0xb6, 0x02, 0xcf,
-	0x8f, 0xd1, 0x34, 0x0c, 0xb0, 0x4b, 0x84, 0x1f, 0x01, 0x03, 0xb4, 0x21, 0x3c, 0x40, 0x1f, 0x04,
-	0xf6, 0x26, 0x9c, 0xa9, 0x04, 0x7b, 0xfe, 0x9e, 0x13, 0xba, 0xf3, 0xb5, 0xaa, 0xf6, 0xbe, 0x5e,
-	0x95, 0xef, 0x3b, 0x6e, 0x87, 0x95, 0x79, 0xf4, 0x6a, 0x35, 0x39, 0x5b, 0xbb, 0xec, 0x35, 0x49,
-	0x8e, 0x14, 0xe4, 0xaf, 0x16, 0x8c, 0x96, 0x12, 0x7c, 0xa5, 0xa8, 0xb2, 0x72, 0x15, 0x55, 0x6f,
-	0x42, 0x69, 0xc3, 0x23, 0x4d, 0x17, 0x93, 0x0d, 0xb1, 0x12, 0x9f, 0xc8, 0x37, 0x2d, 0x59, 0xa6,
-	0x98, 0x52, 0xea, 0xc5, 0x5f, 0x87, 0xcb, 0xa2, 0x32, 0x56, 0x64, 0xd0, 0x36, 0x4c, 0xca, 0x07,
-	0x83, 0x84, 0x8a, 0x75, 0xf9, 0x64, 0xb7, 0x57, 0x88, 0x49, 0xfc, 0xf4, 0xfd, 0x83, 0xd9, 0x49,
-	0x9c, 0x22, 0x83, 0x3b, 0x08, 0xd3, 0xe7, 0xe0, 0x0e, 0x3d, 0x81, 0x07, 0xd8, 0xf0, 0xb3, 0xe7,
-	0x20, 0x7b, 0xd9, 0xb2, 0x52, 0xfb, 0x47, 0x2d, 0x78, 0xa4, 0x63, 0x64, 0xc4, 0x0b, 0xff, 0x98,
-	0x67, 0x21, 0xfd, 0xe2, 0x2e, 0xf4, 0x7e, 0x71, 0xdb, 0x3f, 0x6b, 0xc1, 0xe9, 0xa5, 0x9d, 0x56,
-	0xbc, 0x5f, 0xf1, 0x4c, 0xad, 0xd2, 0x4b, 0x30, 0xb4, 0x43, 0x5c, 0xaf, 0xbd, 0x23, 0x66, 0x6e,
-	0x56, 0x9e, 0x52, 0x2b, 0xac, 0xf4, 0xf0, 0x60, 0x76, 0xac, 0x1e, 0x07, 0xa1, 0xb3, 0x49, 0x78,
-	0x01, 0x16, 0xe8, 0xec, 0xac, 0xf7, 0xde, 0x23, 0x37, 0xbd, 0x1d, 0x4f, 0x9a, 0x0a, 0x75, 0x95,
-	0xd9, 0xcd, 0xc9, 0x01, 0x9d, 0x7b, 0xb3, 0xed, 0xf8, 0xb1, 0x17, 0xef, 0x0b, 0x85, 0x90, 0x24,
-	0x82, 0x13, 0x7a, 0xf6, 0x57, 0x2d, 0x98, 0x90, 0xeb, 0x7e, 0xde, 0x75, 0x43, 0x12, 0x45, 0x68,
-	0x06, 0x0a, 0x5e, 0x4b, 0xf4, 0x12, 0x44, 0x2f, 0x0b, 0xd5, 0x1a, 0x2e, 0x78, 0x2d, 0xc9, 0x96,
-	0xb1, 0x83, 0xb0, 0x68, 0xea, 0xc6, 0xae, 0x8b, 0x72, 0xac, 0x30, 0xd0, 0x15, 0x28, 0xf9, 0x81,
-	0xcb, 0xcd, 0xb5, 0xf8, 0x95, 0xc6, 0x16, 0xd8, 0xaa, 0x28, 0xc3, 0x0a, 0x8a, 0x6a, 0x50, 0xe6,
-	0x96, 0x4c, 0xc9, 0xa2, 0xed, 0xcb, 0x1e, 0x8a, 0x7d, 0xd9, 0x9a, 0xac, 0x89, 0x13, 0x22, 0xf6,
-	0x0f, 0x58, 0x30, 0x2a, 0xbf, 0xac, 0x4f, 0x9e, 0x93, 0x6e, 0xad, 0x84, 0xdf, 0x4c, 0xb6, 0x16,
-	0xe5, 0x19, 0x19, 0xc4, 0x60, 0x15, 0x8b, 0x47, 0x61, 0x15, 0xed, 0x1f, 0x29, 0xc0, 0xb8, 0xec,
-	0x4e, 0xbd, 0xbd, 0x1e, 0x91, 0x18, 0xad, 0x41, 0xd9, 0xe1, 0x43, 0x4e, 0xe4, 0x8a, 0x7d, 0x3c,
-	0x5b, 0x28, 0x60, 0xcc, 0x4f, 0x72, 0x7b, 0xcf, 0xcb, 0xda, 0x38, 0x21, 0x84, 0x9a, 0x30, 0xe5,
-	0x07, 0x31, 0x3b, 0xc9, 0x15, 0xbc, 0x9b, 0xea, 0x25, 0x4d, 0xfd, 0x9c, 0xa0, 0x3e, 0xb5, 0x9a,
-	0xa6, 0x82, 0x3b, 0x09, 0xa3, 0x25, 0x29, 0x68, 0x29, 0xe6, 0xbf, 0xec, 0xf5, 0x59, 0xc8, 0x96,
-	0xb3, 0xd8, 0xbf, 0x62, 0x41, 0x59, 0xa2, 0x9d, 0x84, 0x96, 0x6d, 0x05, 0x86, 0x23, 0x36, 0x09,
-	0x72, 0x68, 0xec, 0x6e, 0x1d, 0xe7, 0xf3, 0x95, 0x5c, 0x50, 0xfc, 0x7f, 0x84, 0x25, 0x0d, 0x26,
-	0x67, 0x57, 0xdd, 0xff, 0x80, 0xc8, 0xd9, 0x55, 0x7f, 0x72, 0x6e, 0x98, 0x3f, 0x60, 0x7d, 0xd6,
-	0x04, 0x57, 0x94, 0x8f, 0x6a, 0x85, 0x64, 0xc3, 0xbb, 0x97, 0xe6, 0xa3, 0x6a, 0xac, 0x14, 0x0b,
-	0x28, 0x7a, 0x1b, 0x46, 0x1b, 0x52, 0xc0, 0x9a, 0x6c, 0xd7, 0xcb, 0x5d, 0x85, 0xfd, 0x4a, 0x2f,
-	0xc4, 0x05, 0x1b, 0x8b, 0x5a, 0x7d, 0x6c, 0x50, 0x33, 0xd5, 0xfc, 0xc5, 0x5e, 0x6a, 0xfe, 0x84,
-	0x6e, 0xbe, 0xd2, 0xfb, 0xc7, 0x2c, 0x18, 0xe2, 0x82, 0xb5, 0xfe, 0xe4, 0x9a, 0x9a, 0x9a, 0x2c,
-	0x19, 0xbb, 0x3b, 0xb4, 0x50, 0xa8, 0xbd, 0xd0, 0x0a, 0x94, 0xd9, 0x0f, 0x26, 0x18, 0x2c, 0xe6,
-	0x5b, 0xc5, 0xf3, 0x56, 0xf5, 0x0e, 0xde, 0x91, 0xd5, 0x70, 0x42, 0xc1, 0xfe, 0xa1, 0x22, 0x3d,
-	0xaa, 0x12, 0x54, 0xe3, 0x06, 0xb7, 0x1e, 0xde, 0x0d, 0x5e, 0x78, 0x58, 0x37, 0xf8, 0x26, 0x4c,
-	0x34, 0x34, 0xa5, 0x5a, 0x32, 0x93, 0x57, 0xba, 0x2e, 0x12, 0x4d, 0xff, 0xc6, 0x45, 0x26, 0x8b,
-	0x26, 0x11, 0x9c, 0xa6, 0x8a, 0xbe, 0x15, 0x46, 0xf9, 0x3c, 0x8b, 0x56, 0xb8, 0xa5, 0xc4, 0xc7,
-	0xf2, 0xd7, 0x8b, 0xde, 0x04, 0x17, 0xb1, 0x69, 0xd5, 0xb1, 0x41, 0xcc, 0xfe, 0x63, 0x0b, 0xd0,
-	0x52, 0x6b, 0x8b, 0xec, 0x90, 0xd0, 0x69, 0x26, 0xb2, 0xf1, 0xff, 0xcf, 0x82, 0x69, 0xd2, 0x51,
-	0xbc, 0x18, 0xec, 0xec, 0x88, 0x17, 0x48, 0xce, 0x23, 0x79, 0x29, 0xa7, 0x8e, 0x72, 0x1b, 0x98,
-	0xce, 0xc3, 0xc0, 0xb9, 0xed, 0xa1, 0x15, 0x38, 0xc5, 0xaf, 0x3c, 0x05, 0xd0, 0x6c, 0xa3, 0x1f,
-	0x15, 0x84, 0x4f, 0xad, 0x75, 0xa2, 0xe0, 0xac, 0x7a, 0xf6, 0x77, 0x8d, 0x42, 0x6e, 0x2f, 0x3e,
-	0x54, 0x0a, 0x7c, 0xa8, 0x14, 0xf8, 0x50, 0x29, 0xf0, 0xa1, 0x52, 0xe0, 0x43, 0xa5, 0xc0, 0x37,
-	0xbc, 0x52, 0xe0, 0x0f, 0x2d, 0x38, 0xd5, 0x79, 0x0d, 0x9c, 0x04, 0x63, 0xde, 0x86, 0x53, 0x9d,
-	0x77, 0x5d, 0x57, 0x3b, 0xb8, 0xce, 0x7e, 0x26, 0xf7, 0x5e, 0xc6, 0x37, 0xe0, 0x2c, 0xfa, 0xf6,
-	0x2f, 0x95, 0x60, 0x70, 0x69, 0x97, 0xf8, 0xf1, 0x09, 0x7c, 0x62, 0x03, 0xc6, 0x3d, 0x7f, 0x37,
-	0x68, 0xee, 0x12, 0x97, 0xc3, 0x8f, 0xf2, 0xde, 0x3d, 0x2b, 0x48, 0x8f, 0x57, 0x0d, 0x12, 0x38,
-	0x45, 0xf2, 0x61, 0xc8, 0x9c, 0xaf, 0xc1, 0x10, 0xbf, 0x1d, 0x84, 0xc0, 0x39, 0xf3, 0x32, 0x60,
-	0x83, 0x28, 0xee, 0xbc, 0x44, 0x1e, 0xce, 0x6f, 0x1f, 0x51, 0x1d, 0xbd, 0x03, 0xe3, 0x1b, 0x5e,
-	0x18, 0xc5, 0x6b, 0xde, 0x0e, 0x89, 0x62, 0x67, 0xa7, 0xf5, 0x00, 0x32, 0x66, 0x35, 0x0e, 0xcb,
-	0x06, 0x25, 0x9c, 0xa2, 0x8c, 0x36, 0x61, 0xac, 0xe9, 0xe8, 0x4d, 0x0d, 0x1f, 0xb9, 0x29, 0x75,
-	0xed, 0xdc, 0xd4, 0x09, 0x61, 0x93, 0x2e, 0xdd, 0xa7, 0x0d, 0x26, 0x26, 0x2d, 0x31, 0xe1, 0x81,
-	0xda, 0xa7, 0x5c, 0x3e, 0xca, 0x61, 0x94, 0x83, 0x62, 0x96, 0xb1, 0x65, 0x93, 0x83, 0xd2, 0xec,
-	0x5f, 0x3f, 0x0f, 0x65, 0x42, 0x87, 0x90, 0x12, 0x16, 0x37, 0xd7, 0xd5, 0xfe, 0xfa, 0xba, 0xe2,
-	0x35, 0xc2, 0xc0, 0x94, 0xee, 0x2f, 0x49, 0x4a, 0x38, 0x21, 0x8a, 0x16, 0x61, 0x28, 0x22, 0xa1,
-	0x47, 0x22, 0x71, 0x87, 0x75, 0x99, 0x46, 0x86, 0xc6, 0x9d, 0x4a, 0xf8, 0x6f, 0x2c, 0xaa, 0xd2,
-	0xe5, 0xe5, 0x30, 0xc1, 0x27, 0xbb, 0x65, 0xb4, 0xe5, 0x35, 0xcf, 0x4a, 0xb1, 0x80, 0xa2, 0x37,
-	0x60, 0x38, 0x24, 0x4d, 0xa6, 0x3e, 0x1a, 0xeb, 0x7f, 0x91, 0x73, 0x6d, 0x14, 0xaf, 0x87, 0x25,
-	0x01, 0x74, 0x03, 0x50, 0x48, 0x28, 0x07, 0xe6, 0xf9, 0x9b, 0xca, 0x5e, 0x54, 0x9c, 0xe0, 0x6a,
-	0xc7, 0xe3, 0x04, 0x43, 0xfa, 0xf7, 0xe0, 0x8c, 0x6a, 0xe8, 0x1a, 0x4c, 0xa9, 0xd2, 0xaa, 0x1f,
-	0xc5, 0x0e, 0x3d, 0x39, 0x27, 0x18, 0x2d, 0x25, 0x00, 0xc1, 0x69, 0x04, 0xdc, 0x59, 0xc7, 0xfe,
-	0x69, 0x0b, 0xf8, 0x38, 0x9f, 0xc0, 0xb3, 0xff, 0x75, 0xf3, 0xd9, 0x7f, 0x2e, 0x77, 0xe6, 0x72,
-	0x9e, 0xfc, 0xf7, 0x2d, 0x18, 0xd1, 0x66, 0x36, 0x59, 0xb3, 0x56, 0x97, 0x35, 0xdb, 0x86, 0x49,
-	0xba, 0xd2, 0x6f, 0xad, 0x47, 0x24, 0xdc, 0x25, 0x2e, 0x5b, 0x98, 0x85, 0x07, 0x5b, 0x98, 0xca,
-	0x90, 0xed, 0x66, 0x8a, 0x20, 0xee, 0x68, 0x02, 0xbd, 0x24, 0x75, 0x29, 0x45, 0xc3, 0x0e, 0x9c,
-	0xeb, 0x49, 0x0e, 0x0f, 0x66, 0x27, 0xb5, 0x0f, 0xd1, 0x75, 0x27, 0xf6, 0xe7, 0xe5, 0x37, 0x2a,
-	0x83, 0xc1, 0x86, 0x5a, 0x2c, 0x29, 0x83, 0x41, 0xb5, 0x1c, 0x70, 0x82, 0x43, 0xf7, 0xe8, 0x56,
-	0x10, 0xc5, 0x69, 0x83, 0xc1, 0xeb, 0x41, 0x14, 0x63, 0x06, 0xb1, 0x9f, 0x07, 0x58, 0xba, 0x47,
-	0x1a, 0x7c, 0xa9, 0xeb, 0xcf, 0x19, 0x2b, 0xff, 0x39, 0x63, 0xff, 0x3b, 0x0b, 0xc6, 0x97, 0x17,
-	0x0d, 0x89, 0xf0, 0x1c, 0x00, 0x7f, 0x83, 0xdd, 0xbd, 0xbb, 0x2a, 0xb5, 0xed, 0x5c, 0x61, 0xaa,
-	0x4a, 0xb1, 0x86, 0x81, 0xce, 0x41, 0xb1, 0xd9, 0xf6, 0x85, 0x74, 0x72, 0x98, 0x5e, 0xd8, 0x37,
-	0xdb, 0x3e, 0xa6, 0x65, 0x9a, 0x13, 0x42, 0xb1, 0x6f, 0x27, 0x84, 0x9e, 0xc1, 0x00, 0xd0, 0x2c,
-	0x0c, 0xee, 0xed, 0x79, 0x2e, 0x77, 0xb9, 0x14, 0x96, 0x00, 0x77, 0xef, 0x56, 0x2b, 0x11, 0xe6,
-	0xe5, 0xf6, 0x97, 0x8a, 0x30, 0xb3, 0xdc, 0x24, 0xf7, 0xde, 0xa7, 0xdb, 0x69, 0xbf, 0x2e, 0x14,
-	0x47, 0x13, 0x0d, 0x1d, 0xd5, 0x4d, 0xa6, 0xf7, 0x78, 0x6c, 0xc0, 0x30, 0xb7, 0x97, 0x93, 0x4e,
-	0xa8, 0xaf, 0x66, 0xb5, 0x9e, 0x3f, 0x20, 0x73, 0xdc, 0xee, 0x4e, 0xf8, 0xd0, 0xa9, 0x9b, 0x56,
-	0x94, 0x62, 0x49, 0x7c, 0xe6, 0x15, 0x18, 0xd5, 0x31, 0x8f, 0xe4, 0xb0, 0xf6, 0xff, 0x16, 0x61,
-	0x92, 0xf6, 0xe0, 0xa1, 0x4e, 0xc4, 0xed, 0xce, 0x89, 0x38, 0x6e, 0xa7, 0xa5, 0xde, 0xb3, 0xf1,
-	0x76, 0x7a, 0x36, 0x9e, 0xcd, 0x9b, 0x8d, 0x93, 0x9e, 0x83, 0xef, 0xb4, 0xe0, 0xd4, 0x72, 0x33,
-	0x68, 0x6c, 0xa7, 0x1c, 0x8b, 0x5e, 0x84, 0x11, 0x7a, 0x8e, 0x47, 0x86, 0xcf, 0xbb, 0x11, 0x05,
-	0x41, 0x80, 0xb0, 0x8e, 0xa7, 0x55, 0xbb, 0x7d, 0xbb, 0x5a, 0xc9, 0x0a, 0x9e, 0x20, 0x40, 0x58,
-	0xc7, 0xb3, 0x7f, 0xd3, 0x82, 0x0b, 0xd7, 0x16, 0x97, 0x92, 0xa5, 0xd8, 0x11, 0xbf, 0xe1, 0x32,
-	0x0c, 0xb5, 0x5c, 0xad, 0x2b, 0x89, 0xc0, 0xb7, 0xc2, 0x7a, 0x21, 0xa0, 0x1f, 0x94, 0xd8, 0x24,
-	0x3f, 0x65, 0xc1, 0xa9, 0x6b, 0x5e, 0x4c, 0xaf, 0xe5, 0x74, 0x24, 0x01, 0x7a, 0x2f, 0x47, 0x5e,
-	0x1c, 0x84, 0xfb, 0xe9, 0x48, 0x02, 0x58, 0x41, 0xb0, 0x86, 0xc5, 0x5b, 0xde, 0xf5, 0x98, 0xa5,
-	0x76, 0xc1, 0xd4, 0x63, 0x61, 0x51, 0x8e, 0x15, 0x06, 0xfd, 0x30, 0xd7, 0x0b, 0x99, 0xd4, 0x70,
-	0x5f, 0x9c, 0xb0, 0xea, 0xc3, 0x2a, 0x12, 0x80, 0x13, 0x1c, 0xfa, 0x80, 0x9a, 0xbd, 0xd6, 0x6c,
-	0x47, 0x31, 0x09, 0x37, 0xa2, 0x9c, 0xd3, 0xf1, 0x79, 0x28, 0x13, 0x29, 0xa3, 0x17, 0xbd, 0x56,
-	0xac, 0xa6, 0x12, 0xde, 0xf3, 0x80, 0x06, 0x0a, 0xaf, 0x0f, 0x37, 0xc5, 0xa3, 0xf9, 0x99, 0x2d,
-	0x03, 0x22, 0x7a, 0x5b, 0x7a, 0x84, 0x07, 0xe6, 0x2a, 0xbe, 0xd4, 0x01, 0xc5, 0x19, 0x35, 0xec,
-	0x1f, 0xb5, 0xe0, 0x8c, 0xfa, 0xe0, 0x0f, 0xdc, 0x67, 0xda, 0x3f, 0x5f, 0x80, 0xb1, 0xeb, 0x6b,
-	0x6b, 0xb5, 0x6b, 0x24, 0x16, 0xd7, 0x76, 0x6f, 0x35, 0x3a, 0xd6, 0xb4, 0x81, 0xdd, 0x5e, 0x81,
-	0xed, 0xd8, 0x6b, 0xce, 0xf1, 0x40, 0x41, 0x73, 0x55, 0x3f, 0xbe, 0x15, 0xd6, 0xe3, 0xd0, 0xf3,
-	0x37, 0x33, 0xf5, 0x87, 0x92, 0xb9, 0x28, 0xe6, 0x31, 0x17, 0xe8, 0x79, 0x18, 0x62, 0x91, 0x8a,
-	0xe4, 0x24, 0x3c, 0xaa, 0x1e, 0x51, 0xac, 0xf4, 0xf0, 0x60, 0xb6, 0x7c, 0x1b, 0x57, 0xf9, 0x1f,
-	0x2c, 0x50, 0xd1, 0x6d, 0x18, 0xd9, 0x8a, 0xe3, 0xd6, 0x75, 0xe2, 0xb8, 0xf4, 0xb5, 0xcc, 0x8f,
-	0xc3, 0x8b, 0x59, 0xc7, 0x21, 0x1d, 0x04, 0x8e, 0x96, 0x9c, 0x20, 0x49, 0x59, 0x84, 0x75, 0x3a,
-	0x76, 0x1d, 0x20, 0x81, 0x1d, 0x93, 0xee, 0xc4, 0xfe, 0x7d, 0x0b, 0x86, 0x79, 0xd0, 0x88, 0x10,
-	0xbd, 0x06, 0x03, 0xe4, 0x1e, 0x69, 0x08, 0x56, 0x39, 0xb3, 0xc3, 0x09, 0xa7, 0xc5, 0x65, 0xc0,
-	0xf4, 0x3f, 0x66, 0xb5, 0xd0, 0x75, 0x18, 0xa6, 0xbd, 0xbd, 0xa6, 0x22, 0x68, 0x3c, 0x96, 0xf7,
-	0xc5, 0x6a, 0xda, 0x39, 0x73, 0x26, 0x8a, 0xb0, 0xac, 0xce, 0xb4, 0xcf, 0x8d, 0x56, 0x9d, 0x9e,
-	0xd8, 0x71, 0x37, 0xc6, 0x62, 0x6d, 0xb1, 0xc6, 0x91, 0x04, 0x35, 0xae, 0x7d, 0x96, 0x85, 0x38,
-	0x21, 0x62, 0xaf, 0x41, 0x99, 0x4e, 0xea, 0x7c, 0xd3, 0x73, 0xba, 0x2b, 0xd4, 0x9f, 0x82, 0xb2,
-	0x54, 0x97, 0x47, 0xc2, 0x59, 0x9c, 0x51, 0x95, 0xda, 0xf4, 0x08, 0x27, 0x70, 0x7b, 0x03, 0x4e,
-	0x33, 0xe3, 0x47, 0x27, 0xde, 0x32, 0xf6, 0x58, 0xef, 0xc5, 0xfc, 0xb4, 0x78, 0x79, 0xf2, 0x99,
-	0x99, 0xd6, 0xfc, 0x31, 0x47, 0x25, 0xc5, 0xe4, 0x15, 0x6a, 0x7f, 0x6d, 0x00, 0x1e, 0xad, 0xd6,
-	0xf3, 0xe3, 0x89, 0xbc, 0x0c, 0xa3, 0x9c, 0x2f, 0xa5, 0x4b, 0xdb, 0x69, 0x8a, 0x76, 0x95, 0xf0,
-	0x77, 0x4d, 0x83, 0x61, 0x03, 0x13, 0x5d, 0x80, 0xa2, 0xf7, 0xae, 0x9f, 0x76, 0x6d, 0xaa, 0xbe,
-	0xb9, 0x8a, 0x69, 0x39, 0x05, 0x53, 0x16, 0x97, 0xdf, 0x1d, 0x0a, 0xac, 0xd8, 0xdc, 0xd7, 0x61,
-	0xdc, 0x8b, 0x1a, 0x91, 0x57, 0xf5, 0xe9, 0x39, 0xa3, 0x9d, 0x54, 0x4a, 0x2a, 0x42, 0x3b, 0xad,
-	0xa0, 0x38, 0x85, 0xad, 0x5d, 0x64, 0x83, 0x7d, 0xb3, 0xc9, 0x3d, 0xbd, 0xa7, 0xe9, 0x0b, 0xa0,
-	0xc5, 0xbe, 0x2e, 0x62, 0x52, 0x7c, 0xf1, 0x02, 0xe0, 0x1f, 0x1c, 0x61, 0x09, 0xa3, 0x4f, 0xce,
-	0xc6, 0x96, 0xd3, 0x9a, 0x6f, 0xc7, 0x5b, 0x15, 0x2f, 0x6a, 0x04, 0xbb, 0x24, 0xdc, 0x67, 0xd2,
-	0x82, 0x52, 0xf2, 0xe4, 0x54, 0x80, 0xc5, 0xeb, 0xf3, 0x35, 0x8a, 0x89, 0x3b, 0xeb, 0xa0, 0x79,
-	0x98, 0x90, 0x85, 0x75, 0x12, 0xb1, 0x2b, 0x6c, 0x84, 0x91, 0x51, 0xce, 0x46, 0xa2, 0x58, 0x11,
-	0x49, 0xe3, 0x9b, 0x9c, 0x34, 0x1c, 0x07, 0x27, 0xfd, 0x12, 0x8c, 0x79, 0xbe, 0x17, 0x7b, 0x4e,
-	0x1c, 0x70, 0x15, 0x14, 0x17, 0x0c, 0x30, 0xd9, 0x7a, 0x55, 0x07, 0x60, 0x13, 0xcf, 0xfe, 0x2f,
-	0x03, 0x30, 0xc5, 0xa6, 0xed, 0xc3, 0x15, 0xf6, 0x8d, 0xb4, 0xc2, 0x6e, 0x77, 0xae, 0xb0, 0xe3,
-	0x78, 0x22, 0x3c, 0xf0, 0x32, 0x7b, 0x07, 0xca, 0xca, 0xbf, 0x4a, 0x3a, 0x58, 0x5a, 0x39, 0x0e,
-	0x96, 0xbd, 0xb9, 0x0f, 0x69, 0xa2, 0x56, 0xcc, 0x34, 0x51, 0xfb, 0xeb, 0x16, 0x24, 0x3a, 0x15,
-	0x74, 0x1d, 0xca, 0xad, 0x80, 0x59, 0x5e, 0x86, 0xd2, 0x9c, 0xf9, 0xd1, 0xcc, 0x8b, 0x8a, 0x5f,
-	0x8a, 0xfc, 0xe3, 0x6b, 0xb2, 0x06, 0x4e, 0x2a, 0xa3, 0x05, 0x18, 0x6e, 0x85, 0xa4, 0x1e, 0xb3,
-	0xb0, 0x22, 0x3d, 0xe9, 0xf0, 0x35, 0xc2, 0xf1, 0xb1, 0xac, 0x68, 0xff, 0x82, 0x05, 0xc0, 0xad,
-	0xc0, 0x1c, 0x7f, 0x93, 0x9c, 0x80, 0xb8, 0xbb, 0x02, 0x03, 0x51, 0x8b, 0x34, 0xba, 0xd9, 0xc4,
-	0x26, 0xfd, 0xa9, 0xb7, 0x48, 0x23, 0x19, 0x70, 0xfa, 0x0f, 0xb3, 0xda, 0xf6, 0x77, 0x03, 0x8c,
-	0x27, 0x68, 0xd5, 0x98, 0xec, 0xa0, 0x67, 0x8c, 0x30, 0x03, 0xe7, 0x52, 0x61, 0x06, 0xca, 0x0c,
-	0x5b, 0x93, 0xac, 0xbe, 0x03, 0xc5, 0x1d, 0xe7, 0x9e, 0x10, 0x9d, 0x3d, 0xd5, 0xbd, 0x1b, 0x94,
-	0xfe, 0xdc, 0x8a, 0x73, 0x8f, 0x3f, 0x12, 0x9f, 0x92, 0x0b, 0x64, 0xc5, 0xb9, 0x77, 0xc8, 0x2d,
-	0x5f, 0xd9, 0x21, 0x75, 0xd3, 0x8b, 0xe2, 0x2f, 0xfc, 0xe7, 0xe4, 0x3f, 0x5b, 0x76, 0xb4, 0x11,
-	0xd6, 0x96, 0xe7, 0x0b, 0x9b, 0xa8, 0xbe, 0xda, 0xf2, 0xfc, 0x74, 0x5b, 0x9e, 0xdf, 0x47, 0x5b,
-	0x9e, 0x8f, 0xde, 0x83, 0x61, 0x61, 0x7f, 0x28, 0xc2, 0xfa, 0x5c, 0xed, 0xa3, 0x3d, 0x61, 0xbe,
-	0xc8, 0xdb, 0xbc, 0x2a, 0x1f, 0xc1, 0xa2, 0xb4, 0x67, 0xbb, 0xb2, 0x41, 0xf4, 0x57, 0x2c, 0x18,
-	0x17, 0xbf, 0x31, 0x79, 0xb7, 0x4d, 0xa2, 0x58, 0xf0, 0x9e, 0x9f, 0xec, 0xbf, 0x0f, 0xa2, 0x22,
-	0xef, 0xca, 0x27, 0xe5, 0x31, 0x6b, 0x02, 0x7b, 0xf6, 0x28, 0xd5, 0x0b, 0xf4, 0xf7, 0x2c, 0x38,
-	0xbd, 0xe3, 0xdc, 0xe3, 0x2d, 0xf2, 0x32, 0xec, 0xc4, 0x5e, 0x20, 0x54, 0xff, 0xaf, 0xf5, 0x37,
-	0xfd, 0x1d, 0xd5, 0x79, 0x27, 0xa5, 0x7e, 0xf2, 0x74, 0x16, 0x4a, 0xcf, 0xae, 0x66, 0xf6, 0x6b,
-	0x66, 0x03, 0x4a, 0x72, 0xbd, 0x65, 0x88, 0x1a, 0x2a, 0x3a, 0x63, 0x7d, 0x64, 0xf3, 0x4f, 0xdd,
-	0xd7, 0x9f, 0xb6, 0x23, 0xd6, 0xda, 0x43, 0x6d, 0xe7, 0x1d, 0x18, 0xd5, 0xd7, 0xd8, 0x43, 0x6d,
-	0xeb, 0x5d, 0x38, 0x95, 0xb1, 0x96, 0x1e, 0x6a, 0x93, 0x7b, 0x70, 0x2e, 0x77, 0x7d, 0x3c, 0xcc,
-	0x86, 0xed, 0x9f, 0xb7, 0xf4, 0x73, 0xf0, 0x04, 0x74, 0x0e, 0x8b, 0xa6, 0xce, 0xe1, 0x62, 0xf7,
-	0x9d, 0x93, 0xa3, 0x78, 0x78, 0x5b, 0xef, 0x34, 0x3d, 0xd5, 0xd1, 0x1b, 0x30, 0xd4, 0xa4, 0x25,
-	0xd2, 0xf0, 0xd5, 0xee, 0xbd, 0x23, 0x13, 0x5e, 0x8a, 0x95, 0x47, 0x58, 0x50, 0xb0, 0x7f, 0xd9,
-	0x82, 0x81, 0x13, 0x18, 0x09, 0x6c, 0x8e, 0xc4, 0x33, 0xb9, 0xa4, 0x45, 0xc4, 0xe1, 0x39, 0xec,
-	0xec, 0x2d, 0xdd, 0x8b, 0x89, 0x1f, 0xb1, 0xa7, 0x62, 0xe6, 0xc0, 0xfc, 0x5f, 0x70, 0xea, 0x66,
-	0xe0, 0xb8, 0x0b, 0x4e, 0xd3, 0xf1, 0x1b, 0x24, 0xac, 0xfa, 0x9b, 0x47, 0xb2, 0xc0, 0x2e, 0xf4,
-	0xb2, 0xc0, 0xb6, 0xb7, 0x00, 0xe9, 0x0d, 0x08, 0x57, 0x16, 0x0c, 0xc3, 0x1e, 0x6f, 0x4a, 0x0c,
-	0xff, 0x13, 0xd9, 0xac, 0x59, 0x47, 0xcf, 0x34, 0x27, 0x0d, 0x5e, 0x80, 0x25, 0x21, 0xfb, 0x65,
-	0xc8, 0xf4, 0x87, 0xef, 0x2d, 0x36, 0xb0, 0x3f, 0x03, 0x53, 0xac, 0xe6, 0x11, 0x9f, 0xb4, 0x76,
-	0x4a, 0x2a, 0x99, 0x11, 0xfc, 0xce, 0xfe, 0xa2, 0x05, 0x13, 0xab, 0xa9, 0x98, 0x60, 0x97, 0x99,
-	0x02, 0x34, 0x43, 0x18, 0x5e, 0x67, 0xa5, 0x58, 0x40, 0x8f, 0x5d, 0x06, 0xf5, 0xe7, 0x16, 0x24,
-	0x21, 0x2a, 0x4e, 0x80, 0xf1, 0x5a, 0x34, 0x18, 0xaf, 0x4c, 0xd9, 0x88, 0xea, 0x4e, 0x1e, 0xdf,
-	0x85, 0x6e, 0xa8, 0x78, 0x4c, 0x5d, 0xc4, 0x22, 0x09, 0x19, 0x1e, 0xbd, 0x67, 0xdc, 0x0c, 0xda,
-	0x24, 0x23, 0x34, 0xd9, 0xff, 0xb1, 0x00, 0x48, 0xe1, 0xf6, 0x1d, 0x2f, 0xaa, 0xb3, 0xc6, 0xf1,
-	0xc4, 0x8b, 0xda, 0x05, 0xc4, 0x54, 0xf8, 0xa1, 0xe3, 0x47, 0x9c, 0xac, 0x27, 0xa4, 0x6e, 0x47,
-	0xb3, 0x0f, 0x98, 0x11, 0x4d, 0xa2, 0x9b, 0x1d, 0xd4, 0x70, 0x46, 0x0b, 0x9a, 0x69, 0xc6, 0x60,
-	0xbf, 0xa6, 0x19, 0x43, 0x3d, 0xdc, 0xd5, 0x7e, 0xce, 0x82, 0x31, 0x35, 0x4c, 0x1f, 0x10, 0xfb,
-	0x73, 0xd5, 0x9f, 0x9c, 0xa3, 0xaf, 0xa6, 0x75, 0x99, 0x5d, 0x09, 0xdf, 0xcc, 0xdc, 0x0e, 0x9d,
-	0xa6, 0xf7, 0x1e, 0x51, 0xd1, 0xfa, 0x66, 0x85, 0x1b, 0xa1, 0x28, 0x3d, 0x3c, 0x98, 0x1d, 0x53,
-	0xff, 0x78, 0x74, 0xe0, 0xa4, 0x8a, 0xfd, 0x13, 0x74, 0xb3, 0x9b, 0x4b, 0x11, 0xbd, 0x08, 0x83,
-	0xad, 0x2d, 0x27, 0x22, 0x29, 0xa7, 0x9b, 0xc1, 0x1a, 0x2d, 0x3c, 0x3c, 0x98, 0x1d, 0x57, 0x15,
-	0x58, 0x09, 0xe6, 0xd8, 0xfd, 0x47, 0xe1, 0xea, 0x5c, 0x9c, 0x3d, 0xa3, 0x70, 0xfd, 0xb1, 0x05,
-	0x03, 0xab, 0x81, 0x7b, 0x12, 0x47, 0xc0, 0xeb, 0xc6, 0x11, 0x70, 0x3e, 0x2f, 0x70, 0x7b, 0xee,
-	0xee, 0x5f, 0x4e, 0xed, 0xfe, 0x8b, 0xb9, 0x14, 0xba, 0x6f, 0xfc, 0x1d, 0x18, 0x61, 0xe1, 0xe0,
-	0x85, 0x83, 0xd1, 0xf3, 0xc6, 0x86, 0x9f, 0x4d, 0x6d, 0xf8, 0x09, 0x0d, 0x55, 0xdb, 0xe9, 0x4f,
-	0xc2, 0xb0, 0x70, 0x72, 0x49, 0x7b, 0x6f, 0x0a, 0x5c, 0x2c, 0xe1, 0xf6, 0x8f, 0x15, 0xc1, 0x08,
-	0x3f, 0x8f, 0x7e, 0xc5, 0x82, 0xb9, 0x90, 0x1b, 0xbf, 0xba, 0x95, 0x76, 0xe8, 0xf9, 0x9b, 0xf5,
-	0xc6, 0x16, 0x71, 0xdb, 0x4d, 0xcf, 0xdf, 0xac, 0x6e, 0xfa, 0x81, 0x2a, 0x5e, 0xba, 0x47, 0x1a,
-	0x6d, 0xa6, 0xbe, 0xea, 0x11, 0xeb, 0x5e, 0x19, 0x91, 0x3f, 0x77, 0xff, 0x60, 0x76, 0x0e, 0x1f,
-	0x89, 0x36, 0x3e, 0x62, 0x5f, 0xd0, 0x6f, 0x5a, 0x70, 0x95, 0x47, 0x65, 0xef, 0xbf, 0xff, 0x5d,
-	0xde, 0xb9, 0x35, 0x49, 0x2a, 0x21, 0xb2, 0x46, 0xc2, 0x9d, 0x85, 0x97, 0xc4, 0x80, 0x5e, 0xad,
-	0x1d, 0xad, 0x2d, 0x7c, 0xd4, 0xce, 0xd9, 0xff, 0xac, 0x08, 0x63, 0x22, 0xb4, 0x93, 0xb8, 0x03,
-	0x5e, 0x34, 0x96, 0xc4, 0x63, 0xa9, 0x25, 0x31, 0x65, 0x20, 0x1f, 0xcf, 0xf1, 0x1f, 0xc1, 0x14,
-	0x3d, 0x9c, 0xaf, 0x13, 0x27, 0x8c, 0xd7, 0x89, 0xc3, 0x2d, 0xae, 0x8a, 0x47, 0x3e, 0xfd, 0x95,
-	0x60, 0xed, 0x66, 0x9a, 0x18, 0xee, 0xa4, 0xff, 0x8d, 0x74, 0xe7, 0xf8, 0x30, 0xd9, 0x11, 0x9d,
-	0xeb, 0x2d, 0x28, 0x2b, 0x0f, 0x0d, 0x71, 0xe8, 0x74, 0x0f, 0x72, 0x97, 0xa6, 0xc0, 0x85, 0x5f,
-	0x89, 0x77, 0x50, 0x42, 0xce, 0xfe, 0xfb, 0x05, 0xa3, 0x41, 0x3e, 0x89, 0xab, 0x50, 0x72, 0xa2,
-	0xc8, 0xdb, 0xf4, 0x89, 0x2b, 0x76, 0xec, 0x47, 0xf3, 0x76, 0xac, 0xd1, 0x0c, 0xf3, 0x92, 0x99,
-	0x17, 0x35, 0xb1, 0xa2, 0x81, 0xae, 0x73, 0xbb, 0xb6, 0x5d, 0xf9, 0x52, 0xeb, 0x8f, 0x1a, 0x48,
-	0xcb, 0xb7, 0x5d, 0x82, 0x45, 0x7d, 0xf4, 0x59, 0x6e, 0x78, 0x78, 0xc3, 0x0f, 0xf6, 0xfc, 0x6b,
-	0x41, 0x20, 0xc3, 0x27, 0xf4, 0x47, 0x70, 0x4a, 0x9a, 0x1b, 0xaa, 0xea, 0xd8, 0xa4, 0xd6, 0x5f,
-	0x04, 0xcb, 0x6f, 0x83, 0x53, 0x94, 0xb4, 0xe9, 0xdd, 0x1c, 0x21, 0x02, 0x13, 0x22, 0x6e, 0x98,
-	0x2c, 0x13, 0x63, 0x97, 0xf9, 0x08, 0x33, 0x6b, 0x27, 0x12, 0xe0, 0x1b, 0x26, 0x09, 0x9c, 0xa6,
-	0x69, 0xff, 0xa4, 0x05, 0xcc, 0xd3, 0xf3, 0x04, 0xf8, 0x91, 0x4f, 0x99, 0xfc, 0xc8, 0x74, 0xde,
-	0x20, 0xe7, 0xb0, 0x22, 0x2f, 0xf0, 0x95, 0x55, 0x0b, 0x83, 0x7b, 0xfb, 0xc2, 0xe8, 0xa3, 0xf7,
-	0xfb, 0xc3, 0xfe, 0x5f, 0x16, 0x3f, 0xc4, 0x94, 0xff, 0x04, 0xfa, 0x76, 0x28, 0x35, 0x9c, 0x96,
-	0xd3, 0xe0, 0xb9, 0x52, 0x72, 0x65, 0x71, 0x46, 0xa5, 0xb9, 0x45, 0x51, 0x83, 0xcb, 0x96, 0x64,
-	0xfc, 0xb9, 0x92, 0x2c, 0xee, 0x29, 0x4f, 0x52, 0x4d, 0xce, 0x6c, 0xc3, 0x98, 0x41, 0xec, 0xa1,
-	0x0a, 0x22, 0xbe, 0x9d, 0x5f, 0xb1, 0x2a, 0x5e, 0xe2, 0x0e, 0x4c, 0xf9, 0xda, 0x7f, 0x7a, 0xa1,
-	0xc8, 0xc7, 0xe5, 0x47, 0x7b, 0x5d, 0xa2, 0xec, 0xf6, 0xd1, 0xfc, 0x4e, 0x53, 0x64, 0x70, 0x27,
-	0x65, 0xfb, 0xc7, 0x2d, 0x78, 0x44, 0x47, 0xd4, 0x5c, 0x5b, 0x7a, 0x49, 0xf7, 0x2b, 0x50, 0x0a,
-	0x5a, 0x24, 0x74, 0xe2, 0x20, 0x14, 0xb7, 0xc6, 0x15, 0x39, 0xe8, 0xb7, 0x44, 0xf9, 0xa1, 0x88,
-	0x34, 0x2e, 0xa9, 0xcb, 0x72, 0xac, 0x6a, 0xd2, 0xd7, 0x27, 0x1b, 0x8c, 0x48, 0x38, 0x31, 0xb1,
-	0x33, 0x80, 0x29, 0xba, 0x23, 0x2c, 0x20, 0xf6, 0xd7, 0x2c, 0xbe, 0xb0, 0xf4, 0xae, 0xa3, 0x77,
-	0x61, 0x72, 0xc7, 0x89, 0x1b, 0x5b, 0x4b, 0xf7, 0x5a, 0x21, 0xd7, 0x95, 0xc8, 0x71, 0x7a, 0xaa,
-	0xd7, 0x38, 0x69, 0x1f, 0x99, 0xd8, 0x52, 0xae, 0xa4, 0x88, 0xe1, 0x0e, 0xf2, 0x68, 0x1d, 0x46,
-	0x58, 0x19, 0xf3, 0xcf, 0x8b, 0xba, 0xb1, 0x06, 0x79, 0xad, 0x29, 0x5b, 0x81, 0x95, 0x84, 0x0e,
-	0xd6, 0x89, 0xda, 0x3f, 0x53, 0xe4, 0xbb, 0x9d, 0xb1, 0xf2, 0x4f, 0xc2, 0x70, 0x2b, 0x70, 0x17,
-	0xab, 0x15, 0x2c, 0x66, 0x41, 0x5d, 0x23, 0x35, 0x5e, 0x8c, 0x25, 0x1c, 0x5d, 0x81, 0x92, 0xf8,
-	0x29, 0x75, 0x5b, 0xec, 0x6c, 0x16, 0x78, 0x11, 0x56, 0x50, 0xf4, 0x1c, 0x40, 0x2b, 0x0c, 0x76,
-	0x3d, 0x97, 0x05, 0x81, 0x28, 0x9a, 0x66, 0x3e, 0x35, 0x05, 0xc1, 0x1a, 0x16, 0x7a, 0x15, 0xc6,
-	0xda, 0x7e, 0xc4, 0xd9, 0x11, 0x67, 0x5d, 0x04, 0xe5, 0x2e, 0x25, 0x06, 0x28, 0xb7, 0x75, 0x20,
-	0x36, 0x71, 0xd1, 0x3c, 0x0c, 0xc5, 0x0e, 0x33, 0x5b, 0x19, 0xcc, 0xb7, 0xb7, 0x5d, 0xa3, 0x18,
-	0x7a, 0x5a, 0x0e, 0x5a, 0x01, 0x8b, 0x8a, 0xe8, 0x2d, 0xe9, 0x2a, 0xcb, 0x0f, 0x76, 0x61, 0xe8,
-	0xde, 0xdf, 0x25, 0xa0, 0x39, 0xca, 0x0a, 0x03, 0x7a, 0x83, 0x16, 0x7a, 0x05, 0x80, 0xdc, 0x8b,
-	0x49, 0xe8, 0x3b, 0x4d, 0x65, 0x15, 0xa6, 0xf8, 0x82, 0x4a, 0xb0, 0x1a, 0xc4, 0xb7, 0x23, 0xb2,
-	0xa4, 0x30, 0xb0, 0x86, 0x6d, 0xff, 0x66, 0x19, 0x20, 0xe1, 0xdb, 0xd1, 0x7b, 0x1d, 0x07, 0xd7,
-	0xd3, 0xdd, 0x39, 0xfd, 0xe3, 0x3b, 0xb5, 0xd0, 0xf7, 0x58, 0x30, 0xe2, 0x34, 0x9b, 0x41, 0xc3,
-	0x89, 0xd9, 0x0c, 0x15, 0xba, 0x1f, 0x9c, 0xa2, 0xfd, 0xf9, 0xa4, 0x06, 0xef, 0xc2, 0xf3, 0x72,
-	0x85, 0x6a, 0x90, 0x9e, 0xbd, 0xd0, 0x1b, 0x46, 0x9f, 0x90, 0x4f, 0xc5, 0xa2, 0x31, 0x94, 0xea,
-	0xa9, 0x58, 0x66, 0x77, 0x84, 0xfe, 0x4a, 0xbc, 0x6d, 0xbc, 0x12, 0x07, 0xf2, 0x7d, 0x01, 0x0d,
-	0xf6, 0xb5, 0xd7, 0x03, 0x11, 0xd5, 0xf4, 0xb8, 0x00, 0x83, 0xf9, 0x8e, 0x77, 0xda, 0x3b, 0xa9,
-	0x47, 0x4c, 0x80, 0x77, 0x60, 0xc2, 0x35, 0x99, 0x00, 0xb1, 0x12, 0x9f, 0xc8, 0xa3, 0x9b, 0xe2,
-	0x19, 0x92, 0x6b, 0x3f, 0x05, 0xc0, 0x69, 0xc2, 0xa8, 0xc6, 0x63, 0x3e, 0x54, 0xfd, 0x8d, 0x40,
-	0x38, 0x5b, 0xd8, 0xb9, 0x73, 0xb9, 0x1f, 0xc5, 0x64, 0x87, 0x62, 0x26, 0xb7, 0xfb, 0xaa, 0xa8,
-	0x8b, 0x15, 0x15, 0xf4, 0x06, 0x0c, 0x31, 0xcf, 0xab, 0x68, 0xba, 0x94, 0x2f, 0x2b, 0x36, 0x83,
-	0x98, 0x25, 0x1b, 0x92, 0xfd, 0x8d, 0xb0, 0xa0, 0x80, 0xae, 0x4b, 0xbf, 0xc6, 0xa8, 0xea, 0xdf,
-	0x8e, 0x08, 0xf3, 0x6b, 0x2c, 0x2f, 0x7c, 0x34, 0x71, 0x59, 0xe4, 0xe5, 0x99, 0xc9, 0xbb, 0x8c,
-	0x9a, 0x94, 0x8b, 0x12, 0xff, 0x65, 0x4e, 0xb0, 0x69, 0xc8, 0xef, 0x9e, 0x99, 0x37, 0x2c, 0x19,
-	0xce, 0x3b, 0x26, 0x09, 0x9c, 0xa6, 0x49, 0x39, 0x52, 0xbe, 0xeb, 0x85, 0xbb, 0x46, 0xaf, 0xb3,
-	0x83, 0x3f, 0xc4, 0xd9, 0x6d, 0xc4, 0x4b, 0xb0, 0xa8, 0x7f, 0xa2, 0xec, 0xc1, 0x8c, 0x0f, 0x93,
-	0xe9, 0x2d, 0xfa, 0x50, 0xd9, 0x91, 0xdf, 0x1f, 0x80, 0x71, 0x73, 0x49, 0xa1, 0xab, 0x50, 0x16,
-	0x44, 0x54, 0x1c, 0x7f, 0xb5, 0x4b, 0x56, 0x24, 0x00, 0x27, 0x38, 0x2c, 0x7d, 0x03, 0xab, 0xae,
-	0x99, 0xd9, 0x26, 0xe9, 0x1b, 0x14, 0x04, 0x6b, 0x58, 0xf4, 0x61, 0xb5, 0x1e, 0x04, 0xb1, 0xba,
-	0x90, 0xd4, 0xba, 0x5b, 0x60, 0xa5, 0x58, 0x40, 0xe9, 0x45, 0xb4, 0x4d, 0x42, 0x9f, 0x34, 0xcd,
-	0xf0, 0xc0, 0xea, 0x22, 0xba, 0xa1, 0x03, 0xb1, 0x89, 0x4b, 0xaf, 0xd3, 0x20, 0x62, 0x0b, 0x59,
-	0x3c, 0xdf, 0x12, 0xb3, 0xe5, 0x3a, 0x77, 0xad, 0x96, 0x70, 0xf4, 0x19, 0x78, 0x44, 0x85, 0x40,
-	0xc2, 0x5c, 0x0f, 0x21, 0x5b, 0x1c, 0x32, 0xa4, 0x2d, 0x8f, 0x2c, 0x66, 0xa3, 0xe1, 0xbc, 0xfa,
-	0xe8, 0x75, 0x18, 0x17, 0x2c, 0xbe, 0xa4, 0x38, 0x6c, 0x9a, 0xc6, 0xdc, 0x30, 0xa0, 0x38, 0x85,
-	0x2d, 0x03, 0x1c, 0x33, 0x2e, 0x5b, 0x52, 0x28, 0x75, 0x06, 0x38, 0xd6, 0xe1, 0xb8, 0xa3, 0x06,
-	0x9a, 0x87, 0x09, 0xce, 0x83, 0x79, 0xfe, 0x26, 0x9f, 0x13, 0xe1, 0x4d, 0xa5, 0xb6, 0xd4, 0x2d,
-	0x13, 0x8c, 0xd3, 0xf8, 0xe8, 0x65, 0x18, 0x75, 0xc2, 0xc6, 0x96, 0x17, 0x93, 0x46, 0xdc, 0x0e,
-	0xb9, 0x9b, 0x95, 0x66, 0x5b, 0x34, 0xaf, 0xc1, 0xb0, 0x81, 0x69, 0xbf, 0x07, 0xa7, 0x32, 0x62,
-	0x2e, 0xd0, 0x85, 0xe3, 0xb4, 0x3c, 0xf9, 0x4d, 0x29, 0x03, 0xe4, 0xf9, 0x5a, 0x55, 0x7e, 0x8d,
-	0x86, 0x45, 0x57, 0x27, 0x8b, 0xcd, 0xa0, 0xa5, 0x00, 0x54, 0xab, 0x73, 0x59, 0x02, 0x70, 0x82,
-	0x63, 0xff, 0xf7, 0x02, 0x4c, 0x64, 0xe8, 0x56, 0x58, 0x1a, 0xba, 0xd4, 0x23, 0x25, 0xc9, 0x3a,
-	0x67, 0xc6, 0xcb, 0x2e, 0x1c, 0x21, 0x5e, 0x76, 0xb1, 0x57, 0xbc, 0xec, 0x81, 0xf7, 0x13, 0x2f,
-	0xdb, 0x1c, 0xb1, 0xc1, 0xbe, 0x46, 0x2c, 0x23, 0xc6, 0xf6, 0xd0, 0x11, 0x63, 0x6c, 0x1b, 0x83,
-	0x3e, 0xdc, 0xc7, 0xa0, 0xff, 0x50, 0x01, 0x26, 0xd3, 0x36, 0x90, 0x27, 0x20, 0xb7, 0x7d, 0xc3,
-	0x90, 0xdb, 0x66, 0x27, 0x75, 0x4c, 0x5b, 0x66, 0xe6, 0xc9, 0x70, 0x71, 0x4a, 0x86, 0xfb, 0xf1,
-	0xbe, 0xa8, 0x75, 0x97, 0xe7, 0xfe, 0xad, 0x02, 0x9c, 0x49, 0x57, 0x59, 0x6c, 0x3a, 0xde, 0xce,
-	0x09, 0x8c, 0xcd, 0x2d, 0x63, 0x6c, 0x9e, 0xe9, 0xe7, 0x6b, 0x58, 0xd7, 0x72, 0x07, 0xe8, 0x6e,
-	0x6a, 0x80, 0xae, 0xf6, 0x4f, 0xb2, 0xfb, 0x28, 0x7d, 0xb5, 0x08, 0x17, 0x33, 0xeb, 0x25, 0x62,
-	0xcf, 0x65, 0x43, 0xec, 0xf9, 0x5c, 0x4a, 0xec, 0x69, 0x77, 0xaf, 0x7d, 0x3c, 0x72, 0x50, 0xe1,
-	0x21, 0xcb, 0x02, 0x08, 0x3c, 0xa0, 0x0c, 0xd4, 0xf0, 0x90, 0x55, 0x84, 0xb0, 0x49, 0xf7, 0x1b,
-	0x49, 0xf6, 0xf9, 0xaf, 0x2c, 0x38, 0x97, 0x39, 0x37, 0x27, 0x20, 0xeb, 0x5a, 0x35, 0x65, 0x5d,
-	0x4f, 0xf6, 0xbd, 0x5a, 0x73, 0x84, 0x5f, 0xbf, 0x3e, 0x90, 0xf3, 0x2d, 0xec, 0x25, 0x7f, 0x0b,
-	0x46, 0x9c, 0x46, 0x83, 0x44, 0xd1, 0x4a, 0xe0, 0xaa, 0x90, 0xc0, 0xcf, 0xb0, 0x77, 0x56, 0x52,
-	0x7c, 0x78, 0x30, 0x3b, 0x93, 0x26, 0x91, 0x80, 0xb1, 0x4e, 0x01, 0x7d, 0x16, 0x4a, 0x91, 0xb8,
-	0x37, 0xc5, 0xdc, 0x3f, 0xdf, 0xe7, 0xe0, 0x38, 0xeb, 0xa4, 0x69, 0x86, 0x39, 0x52, 0x92, 0x0a,
-	0x45, 0xd2, 0x0c, 0x89, 0x52, 0x38, 0xd6, 0x90, 0x28, 0xcf, 0x01, 0xec, 0xaa, 0xc7, 0x40, 0x5a,
-	0xfe, 0xa0, 0x3d, 0x13, 0x34, 0x2c, 0xf4, 0x2d, 0x30, 0x19, 0xf1, 0xa0, 0x7e, 0x8b, 0x4d, 0x27,
-	0x62, 0x6e, 0x2e, 0x62, 0x15, 0xb2, 0x50, 0x4a, 0xf5, 0x14, 0x0c, 0x77, 0x60, 0xa3, 0x65, 0xd9,
-	0x2a, 0x8b, 0x40, 0xc8, 0x17, 0xe6, 0xe5, 0xa4, 0x45, 0x91, 0x04, 0xf7, 0x74, 0x7a, 0xf8, 0xd9,
-	0xc0, 0x6b, 0x35, 0xd1, 0x67, 0x01, 0xe8, 0xf2, 0x11, 0x72, 0x88, 0xe1, 0xfc, 0xc3, 0x93, 0x9e,
-	0x2a, 0x6e, 0xa6, 0x55, 0x2e, 0xf3, 0x4d, 0xad, 0x28, 0x22, 0x58, 0x23, 0x68, 0xff, 0xd0, 0x00,
-	0x3c, 0xda, 0xe5, 0x8c, 0x44, 0xf3, 0xa6, 0x1e, 0xf6, 0xa9, 0xf4, 0xe3, 0x7a, 0x26, 0xb3, 0xb2,
-	0xf1, 0xda, 0x4e, 0x2d, 0xc5, 0xc2, 0xfb, 0x5e, 0x8a, 0xdf, 0x6f, 0x69, 0x62, 0x0f, 0x6e, 0xab,
-	0xf9, 0xa9, 0x23, 0x9e, 0xfd, 0xc7, 0x28, 0x07, 0xd9, 0xc8, 0x10, 0x26, 0x3c, 0xd7, 0x77, 0x77,
-	0xfa, 0x96, 0x2e, 0x9c, 0xac, 0x94, 0xf8, 0x0b, 0x16, 0x3c, 0x96, 0xd9, 0x5f, 0xc3, 0x22, 0xe7,
-	0x2a, 0x94, 0x1b, 0xb4, 0x50, 0x73, 0x45, 0x4c, 0x7c, 0xb4, 0x25, 0x00, 0x27, 0x38, 0x86, 0xe1,
-	0x4d, 0xa1, 0xa7, 0xe1, 0xcd, 0x3f, 0xb5, 0xa0, 0x63, 0x7f, 0x9c, 0xc0, 0x41, 0x5d, 0x35, 0x0f,
-	0xea, 0x8f, 0xf6, 0x33, 0x97, 0x39, 0x67, 0xf4, 0x1f, 0x4d, 0xc0, 0xd9, 0x1c, 0x57, 0x9c, 0x5d,
-	0x98, 0xda, 0x6c, 0x10, 0xd3, 0xc9, 0x53, 0x7c, 0x4c, 0xa6, 0x3f, 0x6c, 0x57, 0x8f, 0x50, 0x96,
-	0xd1, 0x72, 0xaa, 0x03, 0x05, 0x77, 0x36, 0x81, 0xbe, 0x60, 0xc1, 0x69, 0x67, 0x2f, 0xea, 0x48,
-	0x81, 0x2f, 0xd6, 0xcc, 0x0b, 0x99, 0x42, 0x90, 0x1e, 0x29, 0xf3, 0x79, 0x8a, 0xcf, 0x2c, 0x2c,
-	0x9c, 0xd9, 0x16, 0xc2, 0x22, 0x48, 0x3c, 0x65, 0xe7, 0xbb, 0xb8, 0x21, 0x67, 0xf9, 0x4c, 0xf1,
-	0x1b, 0x44, 0x42, 0xb0, 0xa2, 0x83, 0x3e, 0x0f, 0xe5, 0x4d, 0xe9, 0xc8, 0x98, 0x71, 0x43, 0x25,
-	0x03, 0xd9, 0xdd, 0xbd, 0x93, 0x6b, 0x32, 0x15, 0x12, 0x4e, 0x88, 0xa2, 0xd7, 0xa1, 0xe8, 0x6f,
-	0x44, 0xdd, 0xb2, 0x64, 0xa6, 0x4c, 0xd6, 0xb8, 0xb3, 0xff, 0xea, 0x72, 0x1d, 0xd3, 0x8a, 0xe8,
-	0x3a, 0x14, 0xc3, 0x75, 0x57, 0x48, 0xf0, 0x32, 0xcf, 0x70, 0xbc, 0x50, 0xc9, 0xe9, 0x15, 0xa3,
-	0x84, 0x17, 0x2a, 0x98, 0x92, 0x40, 0x35, 0x18, 0x64, 0xfe, 0x2b, 0xe2, 0x3e, 0xc8, 0xe4, 0x7c,
-	0xbb, 0xf8, 0x81, 0xf1, 0x88, 0x00, 0x0c, 0x01, 0x73, 0x42, 0x68, 0x0d, 0x86, 0x1a, 0x2c, 0xa3,
-	0xa2, 0x88, 0x47, 0xf6, 0x89, 0x4c, 0x59, 0x5d, 0x97, 0x54, 0x93, 0x42, 0x74, 0xc5, 0x30, 0xb0,
-	0xa0, 0xc5, 0xa8, 0x92, 0xd6, 0xd6, 0x46, 0x24, 0x32, 0x00, 0x67, 0x53, 0xed, 0x92, 0x41, 0x55,
-	0x50, 0x65, 0x18, 0x58, 0xd0, 0x42, 0xaf, 0x40, 0x61, 0xa3, 0x21, 0x7c, 0x53, 0x32, 0x85, 0x76,
-	0x66, 0xbc, 0x86, 0x85, 0xa1, 0xfb, 0x07, 0xb3, 0x85, 0xe5, 0x45, 0x5c, 0xd8, 0x68, 0xa0, 0x55,
-	0x18, 0xde, 0xe0, 0x1e, 0xde, 0x42, 0x2e, 0xf7, 0x44, 0xb6, 0xf3, 0x79, 0x87, 0x13, 0x38, 0x77,
-	0xcb, 0x10, 0x00, 0x2c, 0x89, 0xb0, 0x98, 0xeb, 0xca, 0x53, 0x5d, 0x84, 0xee, 0x9a, 0x3b, 0x5a,
-	0x74, 0x01, 0x7e, 0x3f, 0x27, 0xfe, 0xee, 0x58, 0xa3, 0x48, 0x57, 0xb5, 0x23, 0xd3, 0xb0, 0x8b,
-	0x50, 0x2c, 0x99, 0xab, 0xba, 0x47, 0x86, 0x7a, 0xbe, 0xaa, 0x15, 0x12, 0x4e, 0x88, 0xa2, 0x6d,
-	0x18, 0xdb, 0x8d, 0x5a, 0x5b, 0x44, 0x6e, 0x69, 0x16, 0x99, 0x25, 0xe7, 0x0a, 0xbb, 0x23, 0x10,
-	0xbd, 0x30, 0x6e, 0x3b, 0xcd, 0x8e, 0x53, 0x88, 0xa9, 0xbf, 0xef, 0xe8, 0xc4, 0xb0, 0x49, 0x9b,
-	0x0e, 0xff, 0xbb, 0xed, 0x60, 0x7d, 0x3f, 0x26, 0x22, 0xe2, 0x56, 0xe6, 0xf0, 0xbf, 0xc9, 0x51,
-	0x3a, 0x87, 0x5f, 0x00, 0xb0, 0x24, 0x82, 0xee, 0x88, 0xe1, 0x61, 0xa7, 0xe7, 0x64, 0x7e, 0x58,
-	0xcc, 0x79, 0x89, 0x94, 0x33, 0x28, 0xec, 0xb4, 0x4c, 0x48, 0xb1, 0x53, 0xb2, 0xb5, 0x15, 0xc4,
-	0x81, 0x9f, 0x3a, 0xa1, 0xa7, 0xf2, 0x4f, 0xc9, 0x5a, 0x06, 0x7e, 0xe7, 0x29, 0x99, 0x85, 0x85,
-	0x33, 0xdb, 0x42, 0x2e, 0x8c, 0xb7, 0x82, 0x30, 0xde, 0x0b, 0x42, 0xb9, 0xbe, 0x50, 0x17, 0xb9,
-	0x82, 0x81, 0x29, 0x5a, 0x64, 0xc1, 0xec, 0x4c, 0x08, 0x4e, 0xd1, 0x44, 0x9f, 0x86, 0xe1, 0xa8,
-	0xe1, 0x34, 0x49, 0xf5, 0xd6, 0xf4, 0xa9, 0xfc, 0xeb, 0xa7, 0xce, 0x51, 0x72, 0x56, 0x17, 0x0f,
-	0xd0, 0xce, 0x51, 0xb0, 0x24, 0x87, 0x96, 0x61, 0x90, 0xe5, 0xd4, 0x62, 0xe1, 0xe1, 0x72, 0xa2,
-	0x7b, 0x76, 0x18, 0x10, 0xf3, 0xb3, 0x89, 0x15, 0x63, 0x5e, 0x9d, 0xee, 0x01, 0xc1, 0x5e, 0x07,
-	0xd1, 0xf4, 0x99, 0xfc, 0x3d, 0x20, 0xb8, 0xf2, 0x5b, 0xf5, 0x6e, 0x7b, 0x40, 0x21, 0xe1, 0x84,
-	0x28, 0x3d, 0x99, 0xe9, 0x69, 0x7a, 0xb6, 0x8b, 0xe5, 0x4b, 0xee, 0x59, 0xca, 0x4e, 0x66, 0x7a,
-	0x92, 0x52, 0x12, 0xf6, 0xef, 0x0e, 0x77, 0xf2, 0x2c, 0xec, 0x41, 0xf6, 0x5d, 0x56, 0x87, 0xae,
-	0xee, 0x93, 0xfd, 0xca, 0x87, 0x8e, 0x91, 0x5b, 0xfd, 0x82, 0x05, 0x67, 0x5b, 0x99, 0x1f, 0x22,
-	0x18, 0x80, 0xfe, 0xc4, 0x4c, 0xfc, 0xd3, 0x55, 0x28, 0xc1, 0x6c, 0x38, 0xce, 0x69, 0x29, 0xfd,
-	0x22, 0x28, 0xbe, 0xef, 0x17, 0xc1, 0x0a, 0x94, 0x18, 0x93, 0xd9, 0x23, 0xc3, 0x70, 0xfa, 0x61,
-	0xc4, 0x58, 0x89, 0x45, 0x51, 0x11, 0x2b, 0x12, 0xe8, 0x07, 0x2c, 0xb8, 0x90, 0xee, 0x3a, 0x26,
-	0x0c, 0x2c, 0xe2, 0x0f, 0xf2, 0xb7, 0xe0, 0xb2, 0xf8, 0xfe, 0x0b, 0xb5, 0x6e, 0xc8, 0x87, 0xbd,
-	0x10, 0x70, 0xf7, 0xc6, 0x50, 0x25, 0xe3, 0x31, 0x3a, 0x64, 0x0a, 0xe0, 0xfb, 0x78, 0x90, 0xbe,
-	0x00, 0xa3, 0x3b, 0x41, 0xdb, 0x8f, 0x85, 0xa1, 0x8c, 0x50, 0xda, 0x33, 0x65, 0xf5, 0x8a, 0x56,
-	0x8e, 0x0d, 0xac, 0xd4, 0x33, 0xb6, 0xf4, 0xc0, 0xcf, 0xd8, 0xb7, 0x61, 0xd4, 0xd7, 0x2c, 0x3b,
-	0x05, 0x3f, 0x70, 0x39, 0x3f, 0x76, 0xa8, 0x6e, 0x07, 0xca, 0x7b, 0xa9, 0x97, 0x60, 0x83, 0xda,
-	0xc9, 0xbe, 0x8d, 0x7e, 0xda, 0xca, 0x60, 0xea, 0xf9, 0x6b, 0xf9, 0x35, 0xf3, 0xb5, 0x7c, 0x39,
-	0xfd, 0x5a, 0xee, 0x10, 0xbe, 0x1a, 0x0f, 0xe5, 0xfe, 0xf3, 0x9c, 0xf4, 0x1b, 0x26, 0xd0, 0x6e,
-	0xc2, 0xa5, 0x5e, 0xd7, 0x12, 0xb3, 0x98, 0x72, 0x95, 0xaa, 0x2d, 0xb1, 0x98, 0x72, 0xab, 0x15,
-	0xcc, 0x20, 0xfd, 0xc6, 0x91, 0xb1, 0xff, 0x9b, 0x05, 0xc5, 0x5a, 0xe0, 0x9e, 0x80, 0x30, 0xf9,
-	0x53, 0x86, 0x30, 0xf9, 0xd1, 0xec, 0x0b, 0xd1, 0xcd, 0x15, 0x1d, 0x2f, 0xa5, 0x44, 0xc7, 0x17,
-	0xf2, 0x08, 0x74, 0x17, 0x14, 0xff, 0x44, 0x11, 0x46, 0x6a, 0x81, 0xab, 0xcc, 0x95, 0x7f, 0xfd,
-	0x41, 0xcc, 0x95, 0x73, 0x03, 0xfc, 0x6b, 0x94, 0x99, 0xa1, 0x95, 0xf4, 0xb1, 0xfc, 0x0b, 0x66,
-	0xb5, 0x7c, 0x97, 0x78, 0x9b, 0x5b, 0x31, 0x71, 0xd3, 0x9f, 0x73, 0x72, 0x56, 0xcb, 0xff, 0xd5,
-	0x82, 0x89, 0x54, 0xeb, 0xa8, 0x09, 0x63, 0x4d, 0x5d, 0x30, 0x29, 0xd6, 0xe9, 0x03, 0xc9, 0x34,
-	0x85, 0xd5, 0xa7, 0x56, 0x84, 0x4d, 0xe2, 0x68, 0x0e, 0x40, 0x69, 0xea, 0xa4, 0x04, 0x8c, 0x71,
-	0xfd, 0x4a, 0x95, 0x17, 0x61, 0x0d, 0x03, 0xbd, 0x08, 0x23, 0x71, 0xd0, 0x0a, 0x9a, 0xc1, 0xe6,
-	0xfe, 0x0d, 0x22, 0x23, 0x17, 0x29, 0x5b, 0xae, 0xb5, 0x04, 0x84, 0x75, 0x3c, 0xfb, 0xa7, 0x8a,
-	0xfc, 0x43, 0xfd, 0xd8, 0xfb, 0x70, 0x4d, 0x7e, 0xb0, 0xd7, 0xe4, 0x57, 0x2d, 0x98, 0xa4, 0xad,
-	0x33, 0x73, 0x11, 0x79, 0xd9, 0xaa, 0x98, 0xc1, 0x56, 0x97, 0x98, 0xc1, 0x97, 0xe9, 0xd9, 0xe5,
-	0x06, 0xed, 0x58, 0x48, 0xd0, 0xb4, 0xc3, 0x89, 0x96, 0x62, 0x01, 0x15, 0x78, 0x24, 0x0c, 0x85,
-	0x8b, 0x9b, 0x8e, 0x47, 0xc2, 0x10, 0x0b, 0xa8, 0x0c, 0x29, 0x3c, 0x90, 0x1d, 0x52, 0x98, 0xc7,
-	0x61, 0x14, 0x86, 0x05, 0x82, 0xed, 0xd1, 0xe2, 0x30, 0x4a, 0x8b, 0x83, 0x04, 0xc7, 0xfe, 0xf9,
-	0x22, 0x8c, 0xd6, 0x02, 0x37, 0xd1, 0x95, 0xbd, 0x60, 0xe8, 0xca, 0x2e, 0xa5, 0x74, 0x65, 0x93,
-	0x3a, 0xee, 0x87, 0x9a, 0xb1, 0xaf, 0x97, 0x66, 0xec, 0x9f, 0x58, 0x6c, 0xd6, 0x2a, 0xab, 0x75,
-	0x6e, 0x7d, 0x84, 0x9e, 0x85, 0x11, 0x76, 0x20, 0x31, 0x9f, 0x4a, 0xa9, 0x40, 0x62, 0x29, 0x94,
-	0x56, 0x93, 0x62, 0xac, 0xe3, 0xa0, 0x2b, 0x50, 0x8a, 0x88, 0x13, 0x36, 0xb6, 0xd4, 0x19, 0x27,
-	0xb4, 0x3d, 0xbc, 0x0c, 0x2b, 0x28, 0x7a, 0x33, 0x09, 0x01, 0x58, 0xcc, 0xf7, 0xd1, 0xd2, 0xfb,
-	0xc3, 0xb7, 0x48, 0x7e, 0xdc, 0x3f, 0xfb, 0x2e, 0xa0, 0x4e, 0xfc, 0x3e, 0x62, 0x5f, 0xcd, 0x9a,
-	0xb1, 0xaf, 0xca, 0x1d, 0x71, 0xaf, 0xfe, 0xcc, 0x82, 0xf1, 0x5a, 0xe0, 0xd2, 0xad, 0xfb, 0x8d,
-	0xb4, 0x4f, 0xf5, 0xf8, 0xa7, 0x43, 0x5d, 0xe2, 0x9f, 0x3e, 0x0e, 0x83, 0xb5, 0xc0, 0xad, 0xd6,
-	0xba, 0xf9, 0x36, 0xdb, 0x7f, 0xdb, 0x82, 0xe1, 0x5a, 0xe0, 0x9e, 0x80, 0x70, 0xfe, 0x35, 0x53,
-	0x38, 0xff, 0x48, 0xce, 0xba, 0xc9, 0x91, 0xc7, 0xff, 0xcd, 0x01, 0x18, 0xa3, 0xfd, 0x0c, 0x36,
-	0xe5, 0x54, 0x1a, 0xc3, 0x66, 0xf5, 0x31, 0x6c, 0x94, 0x17, 0x0e, 0x9a, 0xcd, 0x60, 0x2f, 0x3d,
-	0xad, 0xcb, 0xac, 0x14, 0x0b, 0x28, 0x7a, 0x1a, 0x4a, 0xad, 0x90, 0xec, 0x7a, 0x81, 0x60, 0x32,
-	0x35, 0x55, 0x47, 0x4d, 0x94, 0x63, 0x85, 0x41, 0x1f, 0x67, 0x91, 0xe7, 0x37, 0x48, 0x9d, 0x34,
-	0x02, 0xdf, 0xe5, 0xf2, 0xeb, 0xa2, 0x48, 0x1b, 0xa0, 0x95, 0x63, 0x03, 0x0b, 0xdd, 0x85, 0x32,
-	0xfb, 0xcf, 0x8e, 0x9d, 0xa3, 0x67, 0x93, 0x14, 0xd9, 0xc5, 0x04, 0x01, 0x9c, 0xd0, 0x42, 0xcf,
-	0x01, 0xc4, 0x32, 0x42, 0x76, 0x24, 0xe2, 0x1c, 0x29, 0x86, 0x5c, 0xc5, 0xce, 0x8e, 0xb0, 0x86,
-	0x85, 0x9e, 0x82, 0x72, 0xec, 0x78, 0xcd, 0x9b, 0x9e, 0x4f, 0x22, 0x26, 0x97, 0x2e, 0xca, 0x24,
-	0x5f, 0xa2, 0x10, 0x27, 0x70, 0xca, 0x10, 0xb1, 0x20, 0x00, 0x3c, 0x17, 0x6d, 0x89, 0x61, 0x33,
-	0x86, 0xe8, 0xa6, 0x2a, 0xc5, 0x1a, 0x06, 0xda, 0x82, 0xf3, 0x9e, 0xcf, 0x42, 0xec, 0x93, 0xfa,
-	0xb6, 0xd7, 0x5a, 0xbb, 0x59, 0xbf, 0x43, 0x42, 0x6f, 0x63, 0x7f, 0xc1, 0x69, 0x6c, 0x13, 0x5f,
-	0xe6, 0x09, 0xfc, 0xa8, 0xe8, 0xe2, 0xf9, 0x6a, 0x17, 0x5c, 0xdc, 0x95, 0x92, 0xfd, 0x32, 0x9c,
-	0xa9, 0x05, 0x6e, 0x2d, 0x08, 0xe3, 0xe5, 0x20, 0xdc, 0x73, 0x42, 0x57, 0xae, 0x94, 0x59, 0x99,
-	0x85, 0x84, 0x1e, 0x85, 0x83, 0xfc, 0xa0, 0x30, 0x72, 0x61, 0x3d, 0xcf, 0x98, 0xaf, 0x23, 0x3a,
-	0xa3, 0x34, 0x18, 0x1b, 0xa0, 0xf2, 0x4d, 0x5c, 0x73, 0x62, 0x82, 0x6e, 0xb1, 0xa4, 0xb8, 0xc9,
-	0x8d, 0x28, 0xaa, 0x3f, 0xa9, 0x25, 0xc5, 0x4d, 0x80, 0x99, 0x57, 0xa8, 0x59, 0xdf, 0xfe, 0xd9,
-	0x01, 0x76, 0x38, 0xa6, 0x72, 0x16, 0xa0, 0xcf, 0xc1, 0x78, 0x44, 0x6e, 0x7a, 0x7e, 0xfb, 0x9e,
-	0x94, 0x09, 0x74, 0x71, 0x27, 0xaa, 0x2f, 0xe9, 0x98, 0x5c, 0xb2, 0x68, 0x96, 0xe1, 0x14, 0x35,
-	0xb4, 0x03, 0xe3, 0x7b, 0x9e, 0xef, 0x06, 0x7b, 0x91, 0xa4, 0x5f, 0xca, 0x17, 0x30, 0xde, 0xe5,
-	0x98, 0xa9, 0x3e, 0x1a, 0xcd, 0xdd, 0x35, 0x88, 0xe1, 0x14, 0x71, 0xba, 0x00, 0xc3, 0xb6, 0x3f,
-	0x1f, 0xdd, 0x8e, 0x48, 0x28, 0xd2, 0x1b, 0xb3, 0x05, 0x88, 0x65, 0x21, 0x4e, 0xe0, 0x74, 0x01,
-	0xb2, 0x3f, 0xd7, 0xc2, 0xa0, 0xcd, 0xe3, 0xd8, 0x8b, 0x05, 0x88, 0x55, 0x29, 0xd6, 0x30, 0xe8,
-	0x06, 0x65, 0xff, 0x56, 0x03, 0x1f, 0x07, 0x41, 0x2c, 0xb7, 0x34, 0x4b, 0xa8, 0xa9, 0x95, 0x63,
-	0x03, 0x0b, 0x2d, 0x03, 0x8a, 0xda, 0xad, 0x56, 0x93, 0xd9, 0x29, 0x38, 0x4d, 0x46, 0x8a, 0xeb,
-	0x88, 0x8b, 0x3c, 0x4a, 0x67, 0xbd, 0x03, 0x8a, 0x33, 0x6a, 0xd0, 0xb3, 0x7a, 0x43, 0x74, 0x75,
-	0x90, 0x75, 0x95, 0x2b, 0x23, 0xea, 0xbc, 0x9f, 0x12, 0x86, 0x96, 0x60, 0x38, 0xda, 0x8f, 0x1a,
-	0xb1, 0x08, 0x37, 0x96, 0x93, 0x96, 0xa6, 0xce, 0x50, 0xb4, 0xac, 0x68, 0xbc, 0x0a, 0x96, 0x75,
-	0xed, 0x6f, 0x67, 0xac, 0x00, 0x4b, 0x86, 0x1b, 0xb7, 0x43, 0x82, 0x76, 0x60, 0xac, 0xc5, 0x56,
-	0x98, 0x08, 0xcc, 0x2e, 0x96, 0xc9, 0x0b, 0x7d, 0xbe, 0xe9, 0xf7, 0xe8, 0x09, 0xaa, 0x64, 0x6e,
-	0xec, 0xb1, 0x54, 0xd3, 0xc9, 0x61, 0x93, 0xba, 0xfd, 0xd5, 0xb3, 0xec, 0x32, 0xa9, 0xf3, 0x87,
-	0xfa, 0xb0, 0x30, 0xac, 0x16, 0xaf, 0x92, 0x99, 0x7c, 0x89, 0x51, 0xf2, 0x45, 0xc2, 0x38, 0x1b,
-	0xcb, 0xba, 0xe8, 0xb3, 0x30, 0x4e, 0x99, 0x7c, 0x2d, 0x31, 0xc5, 0xe9, 0x7c, 0x07, 0xf8, 0x24,
-	0x1f, 0x85, 0x96, 0xb4, 0x41, 0xaf, 0x8c, 0x53, 0xc4, 0xd0, 0x9b, 0xcc, 0x04, 0xc0, 0xcc, 0x79,
-	0xd1, 0x83, 0xb4, 0xae, 0xed, 0x97, 0x64, 0x35, 0x22, 0x79, 0xf9, 0x34, 0xec, 0x87, 0x9b, 0x4f,
-	0x03, 0xdd, 0x84, 0x31, 0x91, 0x11, 0x56, 0x08, 0x3a, 0x8b, 0x86, 0x20, 0x6b, 0x0c, 0xeb, 0xc0,
-	0xc3, 0x74, 0x01, 0x36, 0x2b, 0xa3, 0x4d, 0xb8, 0xa0, 0x25, 0x75, 0xb9, 0x16, 0x3a, 0x4c, 0x1b,
-	0xed, 0xb1, 0x93, 0x48, 0xbb, 0xe6, 0x1e, 0xbb, 0x7f, 0x30, 0x7b, 0x61, 0xad, 0x1b, 0x22, 0xee,
-	0x4e, 0x07, 0xdd, 0x82, 0x33, 0xdc, 0x7d, 0xb3, 0x42, 0x1c, 0xb7, 0xe9, 0xf9, 0xea, 0x1e, 0xe5,
-	0xbb, 0xe5, 0xdc, 0xfd, 0x83, 0xd9, 0x33, 0xf3, 0x59, 0x08, 0x38, 0xbb, 0x1e, 0x7a, 0x0d, 0xca,
-	0xae, 0x1f, 0x89, 0x31, 0x18, 0x32, 0xf2, 0xe6, 0x94, 0x2b, 0xab, 0x75, 0xf5, 0xfd, 0xc9, 0x1f,
-	0x9c, 0x54, 0x40, 0x9b, 0x5c, 0xd8, 0xa9, 0x64, 0x0b, 0xc3, 0x1d, 0x81, 0x67, 0xd2, 0x52, 0x2a,
-	0xc3, 0x81, 0x8b, 0x4b, 0xf9, 0x95, 0x5d, 0xb3, 0xe1, 0xdb, 0x65, 0x10, 0x46, 0x6f, 0x00, 0xa2,
-	0xcc, 0xb7, 0xd7, 0x20, 0xf3, 0x0d, 0x16, 0xf5, 0x9f, 0xc9, 0x86, 0x4b, 0xa6, 0x4b, 0x51, 0xbd,
-	0x03, 0x03, 0x67, 0xd4, 0x42, 0xd7, 0xe9, 0x6d, 0xa0, 0x97, 0x0a, 0xfb, 0x6c, 0x95, 0xe5, 0xac,
-	0x42, 0x5a, 0x21, 0x69, 0x38, 0x31, 0x71, 0x4d, 0x8a, 0x38, 0x55, 0x0f, 0xb9, 0x70, 0xde, 0x69,
-	0xc7, 0x01, 0x93, 0x23, 0x9b, 0xa8, 0x6b, 0xc1, 0x36, 0xf1, 0x99, 0x0a, 0xa7, 0xb4, 0x70, 0x89,
-	0x5e, 0xd4, 0xf3, 0x5d, 0xf0, 0x70, 0x57, 0x2a, 0x94, 0xc1, 0x52, 0x39, 0x4a, 0xc1, 0x8c, 0xa7,
-	0x93, 0x91, 0xa7, 0xf4, 0x45, 0x18, 0xd9, 0x0a, 0xa2, 0x78, 0x95, 0xc4, 0x7b, 0x41, 0xb8, 0x2d,
-	0xa2, 0x22, 0x26, 0x91, 0x74, 0x13, 0x10, 0xd6, 0xf1, 0xe8, 0x0b, 0x8a, 0x19, 0x18, 0x54, 0x2b,
-	0x4c, 0xb7, 0x5b, 0x4a, 0xce, 0x98, 0xeb, 0xbc, 0x18, 0x4b, 0xb8, 0x44, 0xad, 0xd6, 0x16, 0x99,
-	0x9e, 0x36, 0x85, 0x5a, 0xad, 0x2d, 0x62, 0x09, 0xa7, 0xcb, 0x35, 0xda, 0x72, 0x42, 0x52, 0x0b,
-	0x83, 0x06, 0x89, 0xb4, 0xf8, 0xcd, 0x8f, 0xf2, 0x98, 0x8f, 0x74, 0xb9, 0xd6, 0xb3, 0x10, 0x70,
-	0x76, 0x3d, 0x44, 0x3a, 0x13, 0x1a, 0x8d, 0xe7, 0x0b, 0xd8, 0x3b, 0x59, 0x81, 0x3e, 0x73, 0x1a,
-	0xf9, 0x30, 0xa9, 0x52, 0x29, 0xf1, 0x28, 0x8f, 0xd1, 0xf4, 0x04, 0x5b, 0xdb, 0xfd, 0x87, 0x88,
-	0x54, 0x2a, 0x8b, 0x6a, 0x8a, 0x12, 0xee, 0xa0, 0x6d, 0x84, 0x4c, 0x9a, 0xec, 0x99, 0xb4, 0xf6,
-	0x2a, 0x94, 0xa3, 0xf6, 0xba, 0x1b, 0xec, 0x38, 0x9e, 0xcf, 0xf4, 0xb4, 0x1a, 0x2b, 0x5f, 0x97,
-	0x00, 0x9c, 0xe0, 0xa0, 0x65, 0x28, 0x39, 0x52, 0x1f, 0x81, 0xf2, 0x23, 0x6d, 0x28, 0x2d, 0x04,
-	0x77, 0x3e, 0x97, 0x1a, 0x08, 0x55, 0x17, 0xbd, 0x0a, 0x63, 0xc2, 0xfd, 0x50, 0x64, 0xf1, 0x3b,
-	0x65, 0xfa, 0x88, 0xd4, 0x75, 0x20, 0x36, 0x71, 0xd1, 0x6d, 0x18, 0x89, 0x83, 0x26, 0x73, 0x74,
-	0xa0, 0x1c, 0xd2, 0xd9, 0xfc, 0x68, 0x5d, 0x6b, 0x0a, 0x4d, 0x17, 0x05, 0xaa, 0xaa, 0x58, 0xa7,
-	0x83, 0xd6, 0xf8, 0x7a, 0x67, 0x71, 0x8c, 0x49, 0x34, 0xfd, 0x48, 0xfe, 0x9d, 0xa4, 0xc2, 0x1d,
-	0x9b, 0xdb, 0x41, 0xd4, 0xc4, 0x3a, 0x19, 0x74, 0x0d, 0xa6, 0x5a, 0xa1, 0x17, 0xb0, 0x35, 0xa1,
-	0x54, 0x51, 0xd3, 0x66, 0xf6, 0x95, 0x5a, 0x1a, 0x01, 0x77, 0xd6, 0x61, 0xde, 0xa3, 0xa2, 0x70,
-	0xfa, 0x1c, 0xcf, 0xda, 0xcb, 0x5f, 0x46, 0xbc, 0x0c, 0x2b, 0x28, 0x5a, 0x61, 0x27, 0x31, 0x7f,
-	0xd4, 0x4f, 0xcf, 0xe4, 0x07, 0xf7, 0xd0, 0x1f, 0xff, 0x9c, 0xef, 0x53, 0x7f, 0x71, 0x42, 0x01,
-	0xb9, 0x5a, 0x46, 0x38, 0xca, 0x6c, 0x47, 0xd3, 0xe7, 0xbb, 0x58, 0x79, 0xa5, 0x38, 0xf3, 0x84,
-	0x21, 0x30, 0x8a, 0x23, 0x9c, 0xa2, 0x89, 0xbe, 0x05, 0x26, 0x45, 0x30, 0xb1, 0x64, 0x98, 0x2e,
-	0x24, 0xe6, 0xa3, 0x38, 0x05, 0xc3, 0x1d, 0xd8, 0x3c, 0xbe, 0xbb, 0xb3, 0xde, 0x24, 0xe2, 0xe8,
-	0xbb, 0xe9, 0xf9, 0xdb, 0xd1, 0xf4, 0x45, 0x76, 0x3e, 0x88, 0xf8, 0xee, 0x69, 0x28, 0xce, 0xa8,
-	0x81, 0xd6, 0x60, 0xb2, 0x15, 0x12, 0xb2, 0xc3, 0x78, 0x64, 0x71, 0x9f, 0xcd, 0x72, 0xe7, 0x69,
-	0xda, 0x93, 0x5a, 0x0a, 0x76, 0x98, 0x51, 0x86, 0x3b, 0x28, 0xa0, 0x3d, 0x28, 0x05, 0xbb, 0x24,
-	0xdc, 0x22, 0x8e, 0x3b, 0x7d, 0xa9, 0x8b, 0x39, 0xb3, 0xb8, 0xdc, 0x6e, 0x09, 0xdc, 0x94, 0xfa,
-	0x5a, 0x16, 0xf7, 0x56, 0x5f, 0xcb, 0xc6, 0xd0, 0x0f, 0x5a, 0x70, 0x4e, 0x4a, 0xbc, 0xeb, 0x2d,
-	0x3a, 0xea, 0x8b, 0x81, 0x1f, 0xc5, 0x21, 0x77, 0xf7, 0x7d, 0x2c, 0xdf, 0x05, 0x76, 0x2d, 0xa7,
-	0x92, 0x92, 0x2b, 0x9e, 0xcb, 0xc3, 0x88, 0x70, 0x7e, 0x8b, 0x33, 0xdf, 0x0c, 0x53, 0x1d, 0x37,
-	0xf7, 0x51, 0x52, 0x4e, 0xcc, 0x6c, 0xc3, 0x98, 0x31, 0x3a, 0x0f, 0x55, 0x73, 0xf9, 0x2f, 0x87,
-	0xa1, 0xac, 0xb4, 0x5a, 0xe8, 0xaa, 0xa9, 0xac, 0x3c, 0x97, 0x56, 0x56, 0x96, 0xe8, 0x6b, 0x56,
-	0xd7, 0x4f, 0xae, 0x65, 0x04, 0x57, 0xca, 0xdb, 0x8b, 0xfd, 0x7b, 0xcd, 0x6a, 0x42, 0xca, 0x62,
-	0xdf, 0x5a, 0xcf, 0x81, 0xae, 0x72, 0xcf, 0x6b, 0x30, 0xe5, 0x07, 0x8c, 0x5d, 0x24, 0xae, 0xe4,
-	0x05, 0xd8, 0x95, 0x5f, 0xd6, 0xa3, 0x15, 0xa4, 0x10, 0x70, 0x67, 0x1d, 0xda, 0x20, 0xbf, 0xb3,
-	0xd3, 0x82, 0x56, 0x7e, 0xa5, 0x63, 0x01, 0x45, 0x8f, 0xc3, 0x60, 0x2b, 0x70, 0xab, 0x35, 0xc1,
-	0x2a, 0x6a, 0xe9, 0x47, 0xdd, 0x6a, 0x0d, 0x73, 0x18, 0x9a, 0x87, 0x21, 0xf6, 0x23, 0x9a, 0x1e,
-	0xcd, 0x77, 0x4b, 0x67, 0x35, 0xb4, 0x84, 0x1e, 0xac, 0x02, 0x16, 0x15, 0x99, 0xc0, 0x87, 0xf2,
-	0xd7, 0x4c, 0xe0, 0x33, 0xfc, 0x80, 0x02, 0x1f, 0x49, 0x00, 0x27, 0xb4, 0xd0, 0x3d, 0x38, 0x63,
-	0xbc, 0x69, 0xf8, 0x12, 0x21, 0x91, 0x70, 0x8d, 0x7d, 0xbc, 0xeb, 0x63, 0x46, 0x68, 0x49, 0x2f,
-	0x88, 0x4e, 0x9f, 0xa9, 0x66, 0x51, 0xc2, 0xd9, 0x0d, 0xa0, 0x26, 0x4c, 0x35, 0x3a, 0x5a, 0x2d,
-	0xf5, 0xdf, 0xaa, 0x9a, 0xd0, 0xce, 0x16, 0x3b, 0x09, 0xa3, 0x57, 0xa1, 0xf4, 0x6e, 0x10, 0xb1,
-	0x63, 0x56, 0xb0, 0xb7, 0xd2, 0xaf, 0xb2, 0xf4, 0xe6, 0xad, 0x3a, 0x2b, 0x3f, 0x3c, 0x98, 0x1d,
-	0xa9, 0x05, 0xae, 0xfc, 0x8b, 0x55, 0x05, 0xf4, 0xbd, 0x16, 0xcc, 0x74, 0x3e, 0x9a, 0x54, 0xa7,
-	0xc7, 0xfa, 0xef, 0xb4, 0x2d, 0x1a, 0x9d, 0x59, 0xca, 0x25, 0x87, 0xbb, 0x34, 0x65, 0x7f, 0x99,
-	0x6b, 0x34, 0x85, 0xde, 0x83, 0x44, 0xed, 0xe6, 0x49, 0x24, 0x40, 0x5c, 0x32, 0x54, 0x32, 0x0f,
-	0xac, 0x35, 0xff, 0x35, 0x8b, 0x69, 0xcd, 0xd7, 0xc8, 0x4e, 0xab, 0xe9, 0xc4, 0x27, 0xe1, 0x96,
-	0xf7, 0x26, 0x94, 0x62, 0xd1, 0x5a, 0xb7, 0x9c, 0x8d, 0x5a, 0xa7, 0x98, 0xe5, 0x80, 0x62, 0x36,
-	0x65, 0x29, 0x56, 0x64, 0xec, 0x7f, 0xc8, 0x67, 0x40, 0x42, 0x4e, 0x40, 0xf2, 0x5d, 0x31, 0x25,
-	0xdf, 0xb3, 0x3d, 0xbe, 0x20, 0x47, 0x02, 0xfe, 0x0f, 0xcc, 0x7e, 0x33, 0x21, 0xcb, 0x07, 0xdd,
-	0x5c, 0xc3, 0xfe, 0x61, 0x0b, 0x4e, 0x67, 0xd9, 0x37, 0xd2, 0x07, 0x02, 0x17, 0xf1, 0x28, 0xf3,
-	0x15, 0x35, 0x82, 0x77, 0x44, 0x39, 0x56, 0x18, 0x7d, 0xa7, 0x43, 0x3a, 0x5a, 0x78, 0xd0, 0x5b,
-	0x30, 0x56, 0x0b, 0x89, 0x76, 0xa1, 0xbd, 0xce, 0xfd, 0x6c, 0x79, 0x7f, 0x9e, 0x3e, 0xb2, 0x8f,
-	0xad, 0xfd, 0x33, 0x05, 0x38, 0xcd, 0xf5, 0xcf, 0xf3, 0xbb, 0x81, 0xe7, 0xd6, 0x02, 0x57, 0xa4,
-	0xb2, 0x7a, 0x0b, 0x46, 0x5b, 0x9a, 0x5c, 0xae, 0x5b, 0xa8, 0x3b, 0x5d, 0x7e, 0x97, 0x48, 0x12,
-	0xf4, 0x52, 0x6c, 0xd0, 0x42, 0x2e, 0x8c, 0x92, 0x5d, 0xaf, 0xa1, 0x94, 0x98, 0x85, 0x23, 0x5f,
-	0x2e, 0xaa, 0x95, 0x25, 0x8d, 0x0e, 0x36, 0xa8, 0x3e, 0x84, 0xec, 0xa6, 0xf6, 0x8f, 0x58, 0xf0,
-	0x48, 0x4e, 0x60, 0x3c, 0xda, 0xdc, 0x1e, 0xd3, 0xf4, 0x8b, 0x44, 0x89, 0xaa, 0x39, 0xae, 0xff,
-	0xc7, 0x02, 0x8a, 0x3e, 0x0d, 0xc0, 0xf5, 0xf7, 0xf4, 0x85, 0xda, 0x2b, 0x82, 0x98, 0x11, 0xfc,
-	0x48, 0x8b, 0x63, 0x23, 0xeb, 0x63, 0x8d, 0x96, 0xfd, 0x93, 0x45, 0x18, 0xe4, 0x29, 0x9e, 0x97,
-	0x61, 0x78, 0x8b, 0x07, 0xf8, 0xef, 0x27, 0x97, 0x40, 0x22, 0x3b, 0xe0, 0x05, 0x58, 0x56, 0x46,
-	0x2b, 0x70, 0x8a, 0x27, 0x48, 0x68, 0x56, 0x48, 0xd3, 0xd9, 0x97, 0x82, 0x2e, 0x9e, 0x5c, 0x50,
-	0x09, 0xfc, 0xaa, 0x9d, 0x28, 0x38, 0xab, 0x1e, 0x7a, 0x1d, 0xc6, 0xe9, 0xc3, 0x23, 0x68, 0xc7,
-	0x92, 0x12, 0x4f, 0x8d, 0xa0, 0x5e, 0x3a, 0x6b, 0x06, 0x14, 0xa7, 0xb0, 0xe9, 0xdb, 0xb7, 0xd5,
-	0x21, 0xd2, 0x1b, 0x4c, 0xde, 0xbe, 0xa6, 0x18, 0xcf, 0xc4, 0x65, 0x86, 0x8d, 0x6d, 0x66, 0xc6,
-	0xb9, 0xb6, 0x15, 0x92, 0x68, 0x2b, 0x68, 0xba, 0x8c, 0xd1, 0x1a, 0xd4, 0x0c, 0x1b, 0x53, 0x70,
-	0xdc, 0x51, 0x83, 0x52, 0xd9, 0x70, 0xbc, 0x66, 0x3b, 0x24, 0x09, 0x95, 0x21, 0x93, 0xca, 0x72,
-	0x0a, 0x8e, 0x3b, 0x6a, 0xd0, 0x75, 0x74, 0xa6, 0x16, 0x06, 0xf4, 0xf0, 0x92, 0xd1, 0x3e, 0x94,
-	0xb5, 0xea, 0xb0, 0x74, 0x4c, 0xec, 0x12, 0x17, 0x4b, 0xd8, 0xf3, 0x71, 0x0a, 0x86, 0xaa, 0xba,
-	0x2e, 0x5c, 0x12, 0x25, 0x15, 0xf4, 0x2c, 0x8c, 0x88, 0xb0, 0xf7, 0xcc, 0xa8, 0x92, 0x4f, 0x1d,
-	0x53, 0xad, 0x57, 0x92, 0x62, 0xac, 0xe3, 0xd8, 0xdf, 0x57, 0x80, 0x53, 0x19, 0x56, 0xf1, 0xfc,
-	0xa8, 0xda, 0xf4, 0xa2, 0x58, 0x25, 0x50, 0xd3, 0x8e, 0x2a, 0x5e, 0x8e, 0x15, 0x06, 0xdd, 0x0f,
-	0xfc, 0x30, 0x4c, 0x1f, 0x80, 0xc2, 0xea, 0x54, 0x40, 0x8f, 0x98, 0x8a, 0xec, 0x12, 0x0c, 0xb4,
-	0x23, 0x22, 0x23, 0xda, 0xa9, 0xf3, 0x9b, 0x69, 0x5c, 0x18, 0x84, 0xb2, 0xc7, 0x9b, 0x4a, 0x79,
-	0xa1, 0xb1, 0xc7, 0x5c, 0x7d, 0xc1, 0x61, 0xb4, 0x73, 0x31, 0xf1, 0x1d, 0x3f, 0x16, 0x4c, 0x74,
-	0x12, 0x9a, 0x89, 0x95, 0x62, 0x01, 0xb5, 0xbf, 0x54, 0x84, 0x73, 0xb9, 0x7e, 0x32, 0xb4, 0xeb,
-	0x3b, 0x81, 0xef, 0xc5, 0x81, 0xb2, 0x59, 0xe0, 0xe1, 0x98, 0x48, 0x6b, 0x6b, 0x45, 0x94, 0x63,
-	0x85, 0x81, 0x2e, 0xc3, 0x20, 0x13, 0x3a, 0x75, 0xa4, 0x92, 0x5b, 0xa8, 0xf0, 0xf8, 0x1c, 0x1c,
-	0xdc, 0x77, 0x9a, 0xce, 0xc7, 0x61, 0xa0, 0x15, 0x04, 0xcd, 0xf4, 0xa1, 0x45, 0xbb, 0x1b, 0x04,
-	0x4d, 0xcc, 0x80, 0xe8, 0x63, 0x62, 0xbc, 0x52, 0x4a, 0x7a, 0xec, 0xb8, 0x41, 0xa4, 0x0d, 0xda,
-	0x93, 0x30, 0xbc, 0x4d, 0xf6, 0x43, 0xcf, 0xdf, 0x4c, 0x1b, 0x6f, 0xdc, 0xe0, 0xc5, 0x58, 0xc2,
-	0xcd, 0xac, 0x40, 0xc3, 0xc7, 0x9d, 0x5f, 0xb3, 0xd4, 0xf3, 0x0a, 0xfc, 0xfe, 0x22, 0x4c, 0xe0,
-	0x85, 0xca, 0x87, 0x13, 0x71, 0xbb, 0x73, 0x22, 0x8e, 0x3b, 0xbf, 0x66, 0xef, 0xd9, 0xf8, 0x45,
-	0x0b, 0x26, 0x58, 0xf0, 0x7d, 0x11, 0xc8, 0xc7, 0x0b, 0xfc, 0x13, 0x60, 0xf1, 0x1e, 0x87, 0xc1,
-	0x90, 0x36, 0x9a, 0xce, 0x21, 0xc7, 0x7a, 0x82, 0x39, 0x0c, 0x9d, 0x87, 0x01, 0xd6, 0x05, 0x3a,
-	0x79, 0xa3, 0x3c, 0xfd, 0x4e, 0xc5, 0x89, 0x1d, 0xcc, 0x4a, 0x59, 0x74, 0x0a, 0x4c, 0x5a, 0x4d,
-	0x8f, 0x77, 0x3a, 0x51, 0x09, 0x7e, 0x30, 0xa2, 0x53, 0x64, 0x76, 0xed, 0xfd, 0x45, 0xa7, 0xc8,
-	0x26, 0xd9, 0xfd, 0xf9, 0xf4, 0x87, 0x05, 0xb8, 0x98, 0x59, 0xaf, 0xef, 0xe8, 0x14, 0xdd, 0x6b,
-	0x3f, 0xcc, 0x20, 0xed, 0xc5, 0x13, 0x34, 0x8d, 0x1b, 0xe8, 0x97, 0xc3, 0x1c, 0xec, 0x23, 0x68,
-	0x44, 0xe6, 0x90, 0x7d, 0x40, 0x82, 0x46, 0x64, 0xf6, 0x2d, 0xe7, 0xf9, 0xf7, 0xe7, 0x85, 0x9c,
-	0x6f, 0x61, 0x0f, 0xc1, 0x2b, 0xf4, 0x9c, 0x61, 0xc0, 0x48, 0x70, 0xcc, 0xa3, 0xfc, 0x8c, 0xe1,
-	0x65, 0x58, 0x41, 0xd1, 0x3c, 0x4c, 0xec, 0x78, 0x3e, 0x3d, 0x7c, 0xf6, 0x4d, 0xc6, 0x4f, 0xc5,
-	0xf4, 0x59, 0x31, 0xc1, 0x38, 0x8d, 0x8f, 0x3c, 0x2d, 0xa0, 0x44, 0x21, 0x3f, 0x2b, 0x73, 0x6e,
-	0x6f, 0xe7, 0x4c, 0x75, 0xa9, 0x1a, 0xc5, 0x8c, 0xe0, 0x12, 0x2b, 0xda, 0xfb, 0xbf, 0xd8, 0xff,
-	0xfb, 0x7f, 0x34, 0xfb, 0xed, 0x3f, 0xf3, 0x2a, 0x8c, 0x3d, 0xb0, 0xc0, 0xd7, 0xfe, 0x6a, 0x11,
-	0x1e, 0xed, 0xb2, 0xed, 0xf9, 0x59, 0x6f, 0xcc, 0x81, 0x76, 0xd6, 0x77, 0xcc, 0x43, 0x0d, 0x4e,
-	0x6f, 0xb4, 0x9b, 0xcd, 0x7d, 0x66, 0x7d, 0x4e, 0x5c, 0x89, 0x21, 0x78, 0xca, 0xf3, 0x32, 0xe1,
-	0xd1, 0x72, 0x06, 0x0e, 0xce, 0xac, 0x49, 0x19, 0x7a, 0x7a, 0x93, 0xec, 0x2b, 0x52, 0x29, 0x86,
-	0x1e, 0xeb, 0x40, 0x6c, 0xe2, 0xa2, 0x6b, 0x30, 0xe5, 0xec, 0x3a, 0x1e, 0x8f, 0xca, 0x29, 0x09,
-	0x70, 0x8e, 0x5e, 0xc9, 0xe9, 0xe6, 0xd3, 0x08, 0xb8, 0xb3, 0x0e, 0x7a, 0x03, 0x50, 0x20, 0xb2,
-	0xca, 0x5f, 0x23, 0xbe, 0xd0, 0x6a, 0xb1, 0xb9, 0x2b, 0x26, 0x47, 0xc2, 0xad, 0x0e, 0x0c, 0x9c,
-	0x51, 0x2b, 0x15, 0xa0, 0x61, 0x28, 0x3f, 0x40, 0x43, 0xf7, 0x73, 0xb1, 0x67, 0x7e, 0x80, 0xff,
-	0x64, 0xd1, 0xeb, 0x8b, 0x33, 0xf9, 0x66, 0x9c, 0xb1, 0x57, 0x99, 0x41, 0x17, 0x97, 0xe1, 0x69,
-	0xb1, 0x12, 0xce, 0x68, 0x06, 0x5d, 0x09, 0x10, 0x9b, 0xb8, 0x7c, 0x41, 0x44, 0x89, 0x8b, 0x9e,
-	0xc1, 0xe2, 0x8b, 0x60, 0x28, 0x0a, 0x03, 0x7d, 0x06, 0x86, 0x5d, 0x6f, 0xd7, 0x8b, 0x82, 0x50,
-	0xac, 0xf4, 0x23, 0xaa, 0x0b, 0x92, 0x73, 0xb0, 0xc2, 0xc9, 0x60, 0x49, 0xcf, 0xfe, 0xfe, 0x02,
-	0x8c, 0xc9, 0x16, 0xdf, 0x6c, 0x07, 0xb1, 0x73, 0x02, 0xd7, 0xf2, 0x35, 0xe3, 0x5a, 0xfe, 0x58,
-	0xb7, 0x88, 0x30, 0xac, 0x4b, 0xb9, 0xd7, 0xf1, 0xad, 0xd4, 0x75, 0xfc, 0x44, 0x6f, 0x52, 0xdd,
-	0xaf, 0xe1, 0x7f, 0x64, 0xc1, 0x94, 0x81, 0x7f, 0x02, 0xb7, 0xc1, 0xb2, 0x79, 0x1b, 0x3c, 0xd6,
-	0xf3, 0x1b, 0x72, 0x6e, 0x81, 0xef, 0x2e, 0xa6, 0xfa, 0xce, 0x4e, 0xff, 0x77, 0x61, 0x60, 0xcb,
-	0x09, 0xdd, 0x6e, 0x11, 0xb0, 0x3b, 0x2a, 0xcd, 0x5d, 0x77, 0x42, 0xa1, 0xd6, 0x7b, 0x5a, 0x25,
-	0x45, 0x76, 0xc2, 0xde, 0x2a, 0x3d, 0xd6, 0x14, 0x7a, 0x19, 0x86, 0xa2, 0x46, 0xd0, 0x52, 0xf6,
-	0xe2, 0x97, 0x78, 0xc2, 0x64, 0x5a, 0x72, 0x78, 0x30, 0x8b, 0xcc, 0xe6, 0x68, 0x31, 0x16, 0xf8,
-	0xe8, 0x2d, 0x18, 0x63, 0xbf, 0x94, 0x8d, 0x4d, 0x31, 0x3f, 0x5b, 0x4e, 0x5d, 0x47, 0xe4, 0x06,
-	0x68, 0x46, 0x11, 0x36, 0x49, 0xcd, 0x6c, 0x42, 0x59, 0x7d, 0xd6, 0x43, 0xd5, 0xc7, 0xfd, 0xdb,
-	0x22, 0x9c, 0xca, 0x58, 0x73, 0x28, 0x32, 0x66, 0xe2, 0xd9, 0x3e, 0x97, 0xea, 0xfb, 0x9c, 0x8b,
-	0x88, 0xbd, 0x86, 0x5c, 0xb1, 0xb6, 0xfa, 0x6e, 0xf4, 0x76, 0x44, 0xd2, 0x8d, 0xd2, 0xa2, 0xde,
-	0x8d, 0xd2, 0xc6, 0x4e, 0x6c, 0xa8, 0x69, 0x43, 0xaa, 0xa7, 0x0f, 0x75, 0x4e, 0xff, 0xa4, 0x08,
-	0xa7, 0xb3, 0x82, 0x54, 0xa1, 0x6f, 0x4b, 0x65, 0x4e, 0x7b, 0xa1, 0xdf, 0xf0, 0x56, 0x3c, 0x9d,
-	0x1a, 0x97, 0x01, 0x2f, 0xcc, 0x99, 0xb9, 0xd4, 0x7a, 0x0e, 0xb3, 0x68, 0x93, 0xb9, 0x9f, 0x87,
-	0x3c, 0xe3, 0x9d, 0x3c, 0x3e, 0x3e, 0xd9, 0x77, 0x07, 0x44, 0xaa, 0xbc, 0x28, 0xa5, 0xbf, 0x97,
-	0xc5, 0xbd, 0xf5, 0xf7, 0xb2, 0xe5, 0x19, 0x0f, 0x46, 0xb4, 0xaf, 0x79, 0xa8, 0x33, 0xbe, 0x4d,
-	0x6f, 0x2b, 0xad, 0xdf, 0x0f, 0x75, 0xd6, 0x7f, 0xc4, 0x82, 0x94, 0x35, 0xb4, 0x12, 0x8b, 0x59,
-	0xb9, 0x62, 0xb1, 0x4b, 0x30, 0x10, 0x06, 0x4d, 0x92, 0x4e, 0x54, 0x86, 0x83, 0x26, 0xc1, 0x0c,
-	0x42, 0x31, 0xe2, 0x44, 0xd8, 0x31, 0xaa, 0x3f, 0xe4, 0xc4, 0x13, 0xed, 0x71, 0x18, 0x6c, 0x92,
-	0x5d, 0xd2, 0x4c, 0xe7, 0x93, 0xb8, 0x49, 0x0b, 0x31, 0x87, 0xd9, 0xbf, 0x38, 0x00, 0x17, 0xba,
-	0x06, 0x70, 0xa0, 0xcf, 0xa1, 0x4d, 0x27, 0x26, 0x7b, 0xce, 0x7e, 0x3a, 0xf0, 0xfb, 0x35, 0x5e,
-	0x8c, 0x25, 0x9c, 0xf9, 0xab, 0xf0, 0xf8, 0xad, 0x29, 0x21, 0xa2, 0x08, 0xdb, 0x2a, 0xa0, 0xa6,
-	0x50, 0xaa, 0x78, 0x1c, 0x42, 0xa9, 0xe7, 0x00, 0xa2, 0xa8, 0xc9, 0x0d, 0x5f, 0x5c, 0xe1, 0x08,
-	0x93, 0xc4, 0xf9, 0xad, 0xdf, 0x14, 0x10, 0xac, 0x61, 0xa1, 0x0a, 0x4c, 0xb6, 0xc2, 0x20, 0xe6,
-	0x32, 0xd9, 0x0a, 0xb7, 0x0d, 0x1b, 0x34, 0x7d, 0xe7, 0x6b, 0x29, 0x38, 0xee, 0xa8, 0x81, 0x5e,
-	0x84, 0x11, 0xe1, 0x4f, 0x5f, 0x0b, 0x82, 0xa6, 0x10, 0x03, 0x29, 0x73, 0xa9, 0x7a, 0x02, 0xc2,
-	0x3a, 0x9e, 0x56, 0x8d, 0x09, 0x7a, 0x87, 0x33, 0xab, 0x71, 0x61, 0xaf, 0x86, 0x97, 0x0a, 0x58,
-	0x57, 0xea, 0x2b, 0x60, 0x5d, 0x22, 0x18, 0x2b, 0xf7, 0xad, 0xdb, 0x82, 0x9e, 0xa2, 0xa4, 0x9f,
-	0x1b, 0x80, 0x53, 0x62, 0xe1, 0x3c, 0xec, 0xe5, 0x72, 0xbb, 0x73, 0xb9, 0x1c, 0x87, 0xe8, 0xec,
-	0xc3, 0x35, 0x73, 0xd2, 0x6b, 0xe6, 0x07, 0x2c, 0x30, 0xd9, 0x2b, 0xf4, 0x7f, 0xe7, 0x66, 0xce,
-	0x78, 0x31, 0x97, 0x5d, 0x73, 0xe5, 0x05, 0xf2, 0x3e, 0x73, 0x68, 0xd8, 0xff, 0xc1, 0x82, 0xc7,
-	0x7a, 0x52, 0x44, 0x4b, 0x50, 0x66, 0x3c, 0xa0, 0xf6, 0x3a, 0x7b, 0x42, 0xd9, 0x8e, 0x4a, 0x40,
-	0x0e, 0x4b, 0x9a, 0xd4, 0x44, 0x4b, 0x1d, 0x29, 0x4a, 0x9e, 0xcc, 0x48, 0x51, 0x72, 0xc6, 0x18,
-	0x9e, 0x07, 0xcc, 0x51, 0xf2, 0xe5, 0x22, 0x0c, 0xf1, 0x15, 0x7f, 0x02, 0xcf, 0xb0, 0x65, 0x21,
-	0xb7, 0xed, 0x12, 0x11, 0x8f, 0xf7, 0x65, 0xae, 0xe2, 0xc4, 0x0e, 0x67, 0x13, 0xd4, 0x6d, 0x95,
-	0x48, 0x78, 0xd1, 0xe7, 0x00, 0xa2, 0x38, 0xf4, 0xfc, 0x4d, 0x5a, 0x26, 0x62, 0x25, 0x7e, 0xbc,
-	0x0b, 0xb5, 0xba, 0x42, 0xe6, 0x34, 0x93, 0x9d, 0xab, 0x00, 0x58, 0xa3, 0x88, 0xe6, 0x8c, 0xfb,
-	0x72, 0x26, 0x25, 0xf8, 0x04, 0x4e, 0x35, 0xb9, 0x3d, 0x67, 0x5e, 0x82, 0xb2, 0x22, 0xde, 0x4b,
-	0x8a, 0x33, 0xaa, 0x33, 0x17, 0x9f, 0x82, 0x89, 0x54, 0xdf, 0x8e, 0x24, 0x04, 0xfa, 0x25, 0x0b,
-	0x26, 0x78, 0x67, 0x96, 0xfc, 0x5d, 0x71, 0xa6, 0xbe, 0x07, 0xa7, 0x9b, 0x19, 0x67, 0x9b, 0x98,
-	0xd1, 0xfe, 0xcf, 0x42, 0x25, 0xf4, 0xc9, 0x82, 0xe2, 0xcc, 0x36, 0xd0, 0x15, 0xba, 0x6e, 0xe9,
-	0xd9, 0xe5, 0x34, 0x85, 0x5b, 0xe3, 0x28, 0x5f, 0xb3, 0xbc, 0x0c, 0x2b, 0xa8, 0xfd, 0xdb, 0x16,
-	0x4c, 0xf1, 0x9e, 0xdf, 0x20, 0xfb, 0x6a, 0x87, 0x7f, 0x3d, 0xfb, 0x2e, 0xb2, 0x06, 0x15, 0x72,
-	0xb2, 0x06, 0xe9, 0x9f, 0x56, 0xec, 0xfa, 0x69, 0x3f, 0x63, 0x81, 0x58, 0x21, 0x27, 0xf0, 0x94,
-	0xff, 0x66, 0xf3, 0x29, 0x3f, 0x93, 0xbf, 0x09, 0x72, 0xde, 0xf0, 0x7f, 0x66, 0xc1, 0x24, 0x47,
-	0x48, 0x74, 0xce, 0x5f, 0xd7, 0x79, 0xe8, 0x27, 0xb7, 0xe8, 0x0d, 0xb2, 0xbf, 0x16, 0xd4, 0x9c,
-	0x78, 0x2b, 0xfb, 0xa3, 0x8c, 0xc9, 0x1a, 0xe8, 0x3a, 0x59, 0xae, 0xdc, 0x40, 0x47, 0x48, 0x58,
-	0x7c, 0xe4, 0xa0, 0xfa, 0xf6, 0xd7, 0x2c, 0x40, 0xbc, 0x19, 0x83, 0xfd, 0xa1, 0x4c, 0x05, 0x2b,
-	0xd5, 0xae, 0x8b, 0xe4, 0x68, 0x52, 0x10, 0xac, 0x61, 0x1d, 0xcb, 0xf0, 0xa4, 0x0c, 0x07, 0x8a,
-	0xbd, 0x0d, 0x07, 0x8e, 0x30, 0xa2, 0x7f, 0x30, 0x08, 0x69, 0x0f, 0x10, 0x74, 0x07, 0x46, 0x1b,
-	0x4e, 0xcb, 0x59, 0xf7, 0x9a, 0x5e, 0xec, 0x91, 0xa8, 0x9b, 0xc5, 0xd1, 0xa2, 0x86, 0x27, 0x54,
-	0xbd, 0x5a, 0x09, 0x36, 0xe8, 0xa0, 0x39, 0x80, 0x56, 0xe8, 0xed, 0x7a, 0x4d, 0xb2, 0xc9, 0x24,
-	0x0e, 0xcc, 0x91, 0x9a, 0x9b, 0xd1, 0xc8, 0x52, 0xac, 0x61, 0x64, 0x78, 0xaa, 0x16, 0x1f, 0xb2,
-	0xa7, 0x2a, 0x9c, 0x98, 0xa7, 0xea, 0xc0, 0x91, 0x3c, 0x55, 0x4b, 0x47, 0xf6, 0x54, 0x1d, 0xec,
-	0xcb, 0x53, 0x15, 0xc3, 0x59, 0xc9, 0xc1, 0xd1, 0xff, 0xcb, 0x5e, 0x93, 0x08, 0xb6, 0x9d, 0x7b,
-	0x7f, 0xcf, 0xdc, 0x3f, 0x98, 0x3d, 0x8b, 0x33, 0x31, 0x70, 0x4e, 0x4d, 0xf4, 0x69, 0x98, 0x76,
-	0x9a, 0xcd, 0x60, 0x4f, 0x4d, 0xea, 0x52, 0xd4, 0x70, 0x9a, 0x5c, 0x94, 0x3f, 0xcc, 0xa8, 0x9e,
-	0xbf, 0x7f, 0x30, 0x3b, 0x3d, 0x9f, 0x83, 0x83, 0x73, 0x6b, 0xa3, 0xd7, 0xa0, 0xdc, 0x0a, 0x83,
-	0xc6, 0x8a, 0xe6, 0xa6, 0x76, 0x91, 0x0e, 0x60, 0x4d, 0x16, 0x1e, 0x1e, 0xcc, 0x8e, 0xa9, 0x3f,
-	0xec, 0xc2, 0x4f, 0x2a, 0xd8, 0xdb, 0x70, 0xaa, 0x4e, 0x42, 0x8f, 0xa5, 0x1f, 0x76, 0x93, 0xf3,
-	0x63, 0x0d, 0xca, 0x61, 0xea, 0xc4, 0xec, 0x2b, 0x8a, 0x9c, 0x16, 0x7d, 0x5c, 0x9e, 0x90, 0x09,
-	0x21, 0xfb, 0x7f, 0x5a, 0x30, 0x2c, 0x3c, 0x32, 0x4e, 0x80, 0x51, 0x9b, 0x37, 0xe4, 0xe5, 0xb3,
-	0xd9, 0xb7, 0x0a, 0xeb, 0x4c, 0xae, 0xa4, 0xbc, 0x9a, 0x92, 0x94, 0x3f, 0xd6, 0x8d, 0x48, 0x77,
-	0x19, 0xf9, 0x5f, 0x2b, 0xc2, 0xb8, 0xe9, 0xba, 0x77, 0x02, 0x43, 0xb0, 0x0a, 0xc3, 0x91, 0xf0,
-	0x4d, 0x2b, 0xe4, 0x5b, 0x64, 0xa7, 0x27, 0x31, 0xb1, 0xd6, 0x12, 0xde, 0x68, 0x92, 0x48, 0xa6,
-	0xd3, 0x5b, 0xf1, 0x21, 0x3a, 0xbd, 0xf5, 0xf2, 0x9e, 0x1c, 0x38, 0x0e, 0xef, 0x49, 0xfb, 0x2b,
-	0xec, 0x66, 0xd3, 0xcb, 0x4f, 0x80, 0xe9, 0xb9, 0x66, 0xde, 0x81, 0x76, 0x97, 0x95, 0x25, 0x3a,
-	0x95, 0xc3, 0xfc, 0xfc, 0x82, 0x05, 0x17, 0x32, 0xbe, 0x4a, 0xe3, 0x84, 0x9e, 0x86, 0x92, 0xd3,
-	0x76, 0x3d, 0xb5, 0x97, 0x35, 0xad, 0xd9, 0xbc, 0x28, 0xc7, 0x0a, 0x03, 0x2d, 0xc2, 0x14, 0xb9,
-	0xd7, 0xf2, 0xb8, 0xc2, 0x50, 0x37, 0xa9, 0x2c, 0xf2, 0xc8, 0xda, 0x4b, 0x69, 0x20, 0xee, 0xc4,
-	0x57, 0xc1, 0x1e, 0x8a, 0xb9, 0xc1, 0x1e, 0xfe, 0xae, 0x05, 0x23, 0xca, 0x3b, 0xeb, 0xa1, 0x8f,
-	0xf6, 0xb7, 0x98, 0xa3, 0xfd, 0x68, 0x97, 0xd1, 0xce, 0x19, 0xe6, 0xbf, 0x51, 0x50, 0xfd, 0xad,
-	0x05, 0x61, 0xdc, 0x07, 0x87, 0xf5, 0x32, 0x94, 0x5a, 0x61, 0x10, 0x07, 0x8d, 0xa0, 0x29, 0x18,
-	0xac, 0xf3, 0x49, 0xd4, 0x13, 0x5e, 0x7e, 0xa8, 0xfd, 0xc6, 0x0a, 0x9b, 0x8d, 0x5e, 0x10, 0xc6,
-	0x82, 0xa9, 0x49, 0x46, 0x2f, 0x08, 0x63, 0xcc, 0x20, 0xc8, 0x05, 0x88, 0x9d, 0x70, 0x93, 0xc4,
-	0xb4, 0x4c, 0x44, 0x59, 0xca, 0x3f, 0x3c, 0xda, 0xb1, 0xd7, 0x9c, 0xf3, 0xfc, 0x38, 0x8a, 0xc3,
-	0xb9, 0xaa, 0x1f, 0xdf, 0x0a, 0xf9, 0x7b, 0x4d, 0x0b, 0x63, 0xa2, 0x68, 0x61, 0x8d, 0xae, 0x74,
-	0x2b, 0x66, 0x6d, 0x0c, 0x9a, 0xfa, 0xf7, 0x55, 0x51, 0x8e, 0x15, 0x86, 0xfd, 0x12, 0xbb, 0x4a,
-	0xd8, 0x00, 0x1d, 0x2d, 0xee, 0xc7, 0x97, 0xcb, 0x6a, 0x68, 0x99, 0xf2, 0xad, 0xa2, 0x47, 0x17,
-	0xe9, 0x7e, 0x72, 0xd3, 0x86, 0x75, 0x17, 0xa3, 0x24, 0x04, 0x09, 0xfa, 0xd6, 0x0e, 0x9b, 0x8a,
-	0x67, 0x7a, 0x5c, 0x01, 0x47, 0xb0, 0xa2, 0x60, 0xd1, 0xfe, 0x59, 0x2c, 0xf4, 0x6a, 0x4d, 0x2c,
-	0x72, 0x2d, 0xda, 0xbf, 0x00, 0xe0, 0x04, 0x07, 0x5d, 0x15, 0xaf, 0x71, 0x2e, 0x9a, 0x7e, 0x34,
-	0xf5, 0x1a, 0x97, 0x9f, 0xaf, 0x09, 0xb3, 0x9f, 0x85, 0x11, 0x95, 0xeb, 0xb2, 0xc6, 0x53, 0x28,
-	0x8a, 0x98, 0x53, 0x4b, 0x49, 0x31, 0xd6, 0x71, 0xd0, 0x1a, 0x4c, 0x44, 0x5c, 0xd4, 0xa3, 0x42,
-	0x8b, 0x72, 0x91, 0xd9, 0xc7, 0xa5, 0x21, 0x4a, 0xdd, 0x04, 0x1f, 0xb2, 0x22, 0x7e, 0x74, 0x48,
-	0x57, 0xde, 0x34, 0x09, 0xf4, 0x3a, 0x8c, 0x37, 0x03, 0xc7, 0x5d, 0x70, 0x9a, 0x8e, 0xdf, 0x60,
-	0xdf, 0x5b, 0x32, 0x53, 0xa6, 0xdd, 0x34, 0xa0, 0x38, 0x85, 0x4d, 0x39, 0x1f, 0xbd, 0x44, 0x84,
-	0xc3, 0x75, 0xfc, 0x4d, 0x12, 0x89, 0xcc, 0x85, 0x8c, 0xf3, 0xb9, 0x99, 0x83, 0x83, 0x73, 0x6b,
-	0xa3, 0x97, 0x61, 0x54, 0x7e, 0xbe, 0xe6, 0xf9, 0x9e, 0xd8, 0xde, 0x6b, 0x30, 0x6c, 0x60, 0xa2,
-	0x3d, 0x38, 0x23, 0xff, 0xaf, 0x85, 0xce, 0xc6, 0x86, 0xd7, 0x10, 0xee, 0xa0, 0xdc, 0x31, 0x6e,
-	0x5e, 0x7a, 0x6f, 0x2d, 0x65, 0x21, 0x1d, 0x1e, 0xcc, 0x5e, 0x12, 0xa3, 0x96, 0x09, 0x67, 0x93,
-	0x98, 0x4d, 0x1f, 0xad, 0xc0, 0xa9, 0x2d, 0xe2, 0x34, 0xe3, 0xad, 0xc5, 0x2d, 0xd2, 0xd8, 0x96,
-	0x9b, 0x88, 0xf9, 0xd3, 0x6b, 0x16, 0xeb, 0xd7, 0x3b, 0x51, 0x70, 0x56, 0x3d, 0xf4, 0x36, 0x4c,
-	0xb7, 0xda, 0xeb, 0x4d, 0x2f, 0xda, 0x5a, 0x0d, 0x62, 0x66, 0x8d, 0xa2, 0x52, 0x67, 0x0a, 0xc7,
-	0x7b, 0x15, 0xb1, 0xa0, 0x96, 0x83, 0x87, 0x73, 0x29, 0xa0, 0xf7, 0xe0, 0x4c, 0x6a, 0x31, 0x08,
-	0xd7, 0xe3, 0xf1, 0xfc, 0xe0, 0xe2, 0xf5, 0xac, 0x0a, 0xc2, 0x8b, 0x3f, 0x0b, 0x84, 0xb3, 0x9b,
-	0x40, 0x2f, 0x40, 0xc9, 0x6b, 0x2d, 0x3b, 0x3b, 0x5e, 0x73, 0x9f, 0x45, 0x47, 0x2f, 0xb3, 0x88,
-	0xe1, 0xa5, 0x6a, 0x8d, 0x97, 0x1d, 0x6a, 0xbf, 0xb1, 0xc2, 0xa4, 0xfc, 0xbe, 0x16, 0x03, 0x32,
-	0x9a, 0x9e, 0x4c, 0x8c, 0x6d, 0xb5, 0x40, 0x91, 0x11, 0x36, 0xb0, 0xde, 0x9f, 0x0d, 0xd3, 0xbb,
-	0xb4, 0xb2, 0xc6, 0x00, 0xa2, 0xcf, 0xc3, 0xa8, 0xbe, 0x62, 0xc5, 0x65, 0x76, 0x39, 0x9b, 0x3f,
-	0xd2, 0x56, 0x36, 0x67, 0x1f, 0xd5, 0xea, 0xd5, 0x61, 0xd8, 0xa0, 0x68, 0x13, 0xc8, 0x1e, 0x4b,
-	0x74, 0x13, 0x4a, 0x8d, 0xa6, 0x47, 0xfc, 0xb8, 0x5a, 0xeb, 0x16, 0xbe, 0x68, 0x51, 0xe0, 0x88,
-	0xc9, 0x11, 0x91, 0x9f, 0x79, 0x19, 0x56, 0x14, 0xec, 0x5f, 0x2d, 0xc0, 0x6c, 0x8f, 0x30, 0xe2,
-	0x29, 0x51, 0xbb, 0xd5, 0x97, 0xa8, 0x7d, 0x5e, 0x26, 0x1d, 0x5d, 0x4d, 0xc9, 0x1f, 0x52, 0x09,
-	0x45, 0x13, 0x29, 0x44, 0x1a, 0xbf, 0x6f, 0xd3, 0x67, 0x5d, 0x5a, 0x3f, 0xd0, 0xd3, 0x78, 0xdf,
-	0xd0, 0xd2, 0x0d, 0xf6, 0xff, 0xe8, 0xc9, 0xd5, 0xb8, 0xd8, 0x5f, 0x29, 0xc0, 0x19, 0x35, 0x84,
-	0xdf, 0xb8, 0x03, 0x77, 0xbb, 0x73, 0xe0, 0x8e, 0x41, 0x5f, 0x65, 0xdf, 0x82, 0x21, 0x1e, 0x8f,
-	0xa9, 0x0f, 0x66, 0xeb, 0x71, 0x33, 0x74, 0xa1, 0x62, 0x09, 0x8c, 0xf0, 0x85, 0xdf, 0x6b, 0xc1,
-	0xc4, 0xda, 0x62, 0xad, 0x1e, 0x34, 0xb6, 0x49, 0x3c, 0xcf, 0x99, 0x63, 0x2c, 0x78, 0x2d, 0xeb,
-	0x01, 0x79, 0xa8, 0x2c, 0xee, 0xec, 0x12, 0x0c, 0x6c, 0x05, 0x51, 0x9c, 0x56, 0x66, 0x5f, 0x0f,
-	0xa2, 0x18, 0x33, 0x88, 0xfd, 0x3b, 0x16, 0x0c, 0xb2, 0x34, 0xdb, 0xbd, 0x12, 0xbd, 0xf7, 0xf3,
-	0x5d, 0xe8, 0x45, 0x18, 0x22, 0x1b, 0x1b, 0xa4, 0x11, 0x8b, 0x59, 0x95, 0xde, 0xc7, 0x43, 0x4b,
-	0xac, 0x94, 0x32, 0x18, 0xac, 0x31, 0xfe, 0x17, 0x0b, 0x64, 0x74, 0x17, 0xca, 0xb1, 0xb7, 0x43,
-	0xe6, 0x5d, 0x57, 0xa8, 0x03, 0x1f, 0xc0, 0x83, 0x7a, 0x4d, 0x12, 0xc0, 0x09, 0x2d, 0xfb, 0x4b,
-	0x05, 0x80, 0x24, 0x1a, 0x47, 0xaf, 0x4f, 0x5c, 0xe8, 0x50, 0x14, 0x5d, 0xce, 0x50, 0x14, 0xa1,
-	0x84, 0x60, 0x86, 0x96, 0x48, 0x0d, 0x53, 0xb1, 0xaf, 0x61, 0x1a, 0x38, 0xca, 0x30, 0x2d, 0xc2,
-	0x54, 0x12, 0x4d, 0xc4, 0x0c, 0xa6, 0xc4, 0x1e, 0x44, 0x6b, 0x69, 0x20, 0xee, 0xc4, 0xb7, 0x09,
-	0x5c, 0x52, 0x41, 0x15, 0xc4, 0x5d, 0xc3, 0xac, 0x4d, 0x8f, 0x90, 0xf3, 0x3f, 0xd1, 0x84, 0x15,
-	0x72, 0x35, 0x61, 0x3f, 0x6e, 0xc1, 0xe9, 0x74, 0x3b, 0xcc, 0xfd, 0xef, 0x8b, 0x16, 0x9c, 0x61,
-	0xfa, 0x40, 0xd6, 0x6a, 0xa7, 0xf6, 0xf1, 0x85, 0xae, 0x81, 0x22, 0x72, 0x7a, 0x9c, 0xb8, 0xb9,
-	0xaf, 0x64, 0x91, 0xc6, 0xd9, 0x2d, 0xda, 0xff, 0xbe, 0x00, 0xd3, 0x79, 0x11, 0x26, 0x98, 0x31,
-	0xba, 0x73, 0xaf, 0xbe, 0x4d, 0xf6, 0x84, 0xc9, 0x6f, 0x62, 0x8c, 0xce, 0x8b, 0xb1, 0x84, 0xa7,
-	0x23, 0x43, 0x17, 0xfa, 0x8b, 0x0c, 0x8d, 0xb6, 0x60, 0x6a, 0x6f, 0x8b, 0xf8, 0xb7, 0xfd, 0xc8,
-	0x89, 0xbd, 0x68, 0xc3, 0x63, 0x19, 0xdb, 0xf9, 0xba, 0x79, 0x45, 0x1a, 0xe6, 0xde, 0x4d, 0x23,
-	0x1c, 0x1e, 0xcc, 0x5e, 0x30, 0x0a, 0x92, 0x2e, 0xf3, 0x83, 0x04, 0x77, 0x12, 0xed, 0x0c, 0xac,
-	0x3d, 0xf0, 0x10, 0x03, 0x6b, 0xdb, 0x5f, 0xb4, 0xe0, 0x5c, 0x6e, 0xe2, 0x3b, 0x74, 0x05, 0x4a,
-	0x4e, 0xcb, 0xe3, 0x82, 0x53, 0x71, 0x8c, 0x32, 0x01, 0x40, 0xad, 0xca, 0xc5, 0xa6, 0x0a, 0xaa,
-	0x12, 0xf2, 0x16, 0x72, 0x13, 0xf2, 0xf6, 0xcc, 0xaf, 0x6b, 0x7f, 0x8f, 0x05, 0xc2, 0x91, 0xae,
-	0x8f, 0xb3, 0xfb, 0x2d, 0x99, 0xcf, 0xdc, 0x48, 0xbe, 0x71, 0x29, 0xdf, 0xb3, 0x50, 0xa4, 0xdc,
-	0x50, 0xbc, 0x92, 0x91, 0x68, 0xc3, 0xa0, 0x65, 0xbb, 0x20, 0xa0, 0x15, 0xc2, 0xc4, 0x8e, 0xbd,
-	0x7b, 0xf3, 0x1c, 0x80, 0xcb, 0x70, 0xb5, 0xac, 0xc6, 0xea, 0x66, 0xae, 0x28, 0x08, 0xd6, 0xb0,
-	0xec, 0x7f, 0x5d, 0x80, 0x11, 0x99, 0xec, 0xa1, 0xed, 0xf7, 0x23, 0x1c, 0x38, 0x52, 0xf6, 0x37,
-	0x96, 0x06, 0x9c, 0x12, 0xae, 0x25, 0x32, 0x95, 0x24, 0x0d, 0xb8, 0x04, 0xe0, 0x04, 0x87, 0xee,
-	0xa2, 0xa8, 0xbd, 0xce, 0xd0, 0x53, 0x6e, 0x5f, 0x75, 0x5e, 0x8c, 0x25, 0x1c, 0x7d, 0x1a, 0x26,
-	0x79, 0xbd, 0x30, 0x68, 0x39, 0x9b, 0x5c, 0x22, 0x3d, 0xa8, 0xfc, 0xb5, 0x27, 0x57, 0x52, 0xb0,
-	0xc3, 0x83, 0xd9, 0xd3, 0xe9, 0x32, 0xa6, 0x6a, 0xe9, 0xa0, 0xc2, 0xcc, 0x37, 0x78, 0x23, 0x74,
-	0xf7, 0x77, 0x58, 0x7d, 0x24, 0x20, 0xac, 0xe3, 0xd9, 0x9f, 0x07, 0xd4, 0x99, 0xf6, 0x02, 0xbd,
-	0xc1, 0x6d, 0xf6, 0xbc, 0x90, 0xb8, 0xdd, 0x54, 0x2f, 0xba, 0x57, 0xb2, 0xf4, 0xd8, 0xe0, 0xb5,
-	0xb0, 0xaa, 0x6f, 0xff, 0xff, 0x45, 0x98, 0x4c, 0xfb, 0xa8, 0xa2, 0xeb, 0x30, 0xc4, 0x59, 0x0f,
-	0x41, 0xbe, 0x8b, 0x66, 0x5f, 0xf3, 0x6c, 0x65, 0x87, 0xb0, 0xe0, 0x5e, 0x44, 0x7d, 0xf4, 0x36,
-	0x8c, 0xb8, 0xc1, 0x9e, 0xbf, 0xe7, 0x84, 0xee, 0x7c, 0xad, 0x2a, 0x96, 0x73, 0xe6, 0x6b, 0xa9,
-	0x92, 0xa0, 0xe9, 0xde, 0xb2, 0x4c, 0x8b, 0x95, 0x80, 0xb0, 0x4e, 0x0e, 0xad, 0xb1, 0x28, 0xbd,
-	0x1b, 0xde, 0xe6, 0x8a, 0xd3, 0xea, 0x66, 0xc0, 0xbd, 0x28, 0x91, 0x34, 0xca, 0x63, 0x22, 0x94,
-	0x2f, 0x07, 0xe0, 0x84, 0x10, 0xfa, 0x36, 0x38, 0x15, 0xe5, 0x08, 0x58, 0xf3, 0xb2, 0x20, 0x75,
-	0x93, 0x39, 0x2e, 0x3c, 0x42, 0xdf, 0xb1, 0x59, 0xa2, 0xd8, 0xac, 0x66, 0xec, 0x5f, 0x3b, 0x05,
-	0xc6, 0x26, 0x36, 0x92, 0xe2, 0x59, 0xc7, 0x94, 0x14, 0x0f, 0x43, 0x89, 0xec, 0xb4, 0xe2, 0xfd,
-	0x8a, 0x17, 0x76, 0xcb, 0xaa, 0xba, 0x24, 0x70, 0x3a, 0x69, 0x4a, 0x08, 0x56, 0x74, 0xb2, 0x33,
-	0x17, 0x16, 0xbf, 0x8e, 0x99, 0x0b, 0x07, 0x4e, 0x30, 0x73, 0xe1, 0x2a, 0x0c, 0x6f, 0x7a, 0x31,
-	0x26, 0xad, 0x40, 0x30, 0xfd, 0x99, 0xeb, 0xf0, 0x1a, 0x47, 0xe9, 0xcc, 0x91, 0x25, 0x00, 0x58,
-	0x12, 0x41, 0x6f, 0xa8, 0x1d, 0x38, 0x94, 0xff, 0x66, 0xee, 0x54, 0x41, 0x67, 0xee, 0x41, 0x91,
-	0x9f, 0x70, 0xf8, 0x41, 0xf3, 0x13, 0x2e, 0xcb, 0xac, 0x82, 0xa5, 0x7c, 0x6f, 0x0b, 0x96, 0x34,
-	0xb0, 0x47, 0x2e, 0xc1, 0x3b, 0x7a, 0x26, 0xc6, 0x72, 0xfe, 0x49, 0xa0, 0x92, 0x2c, 0xf6, 0x99,
-	0x7f, 0xf1, 0x7b, 0x2c, 0x38, 0xd3, 0xca, 0x4a, 0x4a, 0x2a, 0xb4, 0xb5, 0x2f, 0xf6, 0x9d, 0x75,
-	0xd5, 0x68, 0x90, 0x09, 0x6a, 0x32, 0xd1, 0x70, 0x76, 0x73, 0x74, 0xa0, 0xc3, 0x75, 0x57, 0x24,
-	0x10, 0x7c, 0x3c, 0x27, 0x91, 0x63, 0x97, 0xf4, 0x8d, 0x6b, 0x19, 0x49, 0x03, 0x3f, 0x9a, 0x97,
-	0x34, 0xb0, 0xef, 0x54, 0x81, 0x6f, 0xa8, 0x14, 0x8e, 0x63, 0xf9, 0x4b, 0x89, 0x27, 0x68, 0xec,
-	0x99, 0xb8, 0xf1, 0x0d, 0x95, 0xb8, 0xb1, 0x4b, 0x1c, 0x49, 0x9e, 0x96, 0xb1, 0x67, 0xba, 0x46,
-	0x2d, 0xe5, 0xe2, 0xc4, 0xf1, 0xa4, 0x5c, 0x34, 0xae, 0x1a, 0x9e, 0xf5, 0xef, 0xa9, 0x1e, 0x57,
-	0x8d, 0x41, 0xb7, 0xfb, 0x65, 0xc3, 0xd3, 0x4b, 0x4e, 0x3d, 0x50, 0x7a, 0xc9, 0x3b, 0x7a, 0xba,
-	0x46, 0xd4, 0x23, 0x1f, 0x21, 0x45, 0xea, 0x33, 0x49, 0xe3, 0x1d, 0xfd, 0x02, 0x3c, 0x95, 0x4f,
-	0x57, 0xdd, 0x73, 0x9d, 0x74, 0x33, 0xaf, 0xc0, 0x8e, 0xe4, 0x8f, 0xa7, 0x4f, 0x26, 0xf9, 0xe3,
-	0x99, 0x63, 0x4f, 0xfe, 0x78, 0xf6, 0x04, 0x92, 0x3f, 0x3e, 0x72, 0x82, 0xc9, 0x1f, 0xef, 0x30,
-	0x13, 0x07, 0x1e, 0x8e, 0x44, 0xc4, 0xbd, 0xcc, 0x8e, 0xb1, 0x98, 0x15, 0xb3, 0x84, 0x7f, 0x9c,
-	0x02, 0xe1, 0x84, 0x54, 0x46, 0x52, 0xc9, 0xe9, 0x87, 0x90, 0x54, 0x72, 0x35, 0x49, 0x2a, 0x79,
-	0x2e, 0x7f, 0xaa, 0x33, 0x4c, 0xcb, 0x73, 0x52, 0x49, 0xde, 0xd1, 0x53, 0x40, 0x3e, 0xda, 0x45,
-	0x14, 0x9f, 0x25, 0x78, 0xec, 0x92, 0xf8, 0xf1, 0x75, 0x9e, 0xf8, 0xf1, 0x7c, 0xfe, 0x49, 0x9e,
-	0xbe, 0xee, 0xcc, 0x74, 0x8f, 0xdf, 0x57, 0x80, 0x8b, 0xdd, 0xf7, 0x45, 0x22, 0xf5, 0xac, 0x25,
-	0x1a, 0xc1, 0x94, 0xd4, 0x93, 0xbf, 0xad, 0x12, 0xac, 0xbe, 0x23, 0x55, 0x5d, 0x83, 0x29, 0x65,
-	0x3b, 0xde, 0xf4, 0x1a, 0xfb, 0x5a, 0x86, 0x7b, 0xe5, 0x6f, 0x5b, 0x4f, 0x23, 0xe0, 0xce, 0x3a,
-	0x68, 0x1e, 0x26, 0x8c, 0xc2, 0x6a, 0x45, 0xbc, 0xa1, 0x94, 0x98, 0xb5, 0x6e, 0x82, 0x71, 0x1a,
-	0xdf, 0xfe, 0x69, 0x0b, 0x1e, 0xc9, 0xc9, 0xab, 0xd4, 0x77, 0x20, 0xa6, 0x0d, 0x98, 0x68, 0x99,
-	0x55, 0x7b, 0xc4, 0x6b, 0x33, 0xb2, 0x37, 0xa9, 0xbe, 0xa6, 0x00, 0x38, 0x4d, 0xd4, 0xfe, 0x53,
-	0x0b, 0x2e, 0x74, 0x35, 0xe3, 0x42, 0x18, 0xce, 0x6e, 0xee, 0x44, 0xce, 0x62, 0x48, 0x5c, 0xe2,
-	0xc7, 0x9e, 0xd3, 0xac, 0xb7, 0x48, 0x43, 0x93, 0x5b, 0x33, 0x7b, 0xa8, 0x6b, 0x2b, 0xf5, 0xf9,
-	0x4e, 0x0c, 0x9c, 0x53, 0x13, 0x2d, 0x03, 0xea, 0x84, 0x88, 0x19, 0x66, 0x31, 0x5d, 0x3b, 0xe9,
-	0xe1, 0x8c, 0x1a, 0xe8, 0x25, 0x18, 0x53, 0xe6, 0x61, 0xda, 0x8c, 0xb3, 0x03, 0x18, 0xeb, 0x00,
-	0x6c, 0xe2, 0x2d, 0x5c, 0xf9, 0x8d, 0xdf, 0xbb, 0xf8, 0x91, 0xdf, 0xfa, 0xbd, 0x8b, 0x1f, 0xf9,
-	0xed, 0xdf, 0xbb, 0xf8, 0x91, 0xef, 0xb8, 0x7f, 0xd1, 0xfa, 0x8d, 0xfb, 0x17, 0xad, 0xdf, 0xba,
-	0x7f, 0xd1, 0xfa, 0xed, 0xfb, 0x17, 0xad, 0xdf, 0xbd, 0x7f, 0xd1, 0xfa, 0xd2, 0xef, 0x5f, 0xfc,
-	0xc8, 0x5b, 0x85, 0xdd, 0x67, 0xff, 0x4f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x5e, 0x40, 0x10, 0x5c,
-	0xb3, 0xfc, 0x00, 0x00,
+	// 13727 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0xbd, 0x7b, 0x70, 0x24, 0x49,
+	0x5a, 0x18, 0x7e, 0xd5, 0xad, 0x47, 0xf7, 0xa7, 0x77, 0xce, 0x63, 0x35, 0xda, 0x99, 0xd1, 0x6c,
+	0xed, 0xdd, 0xec, 0xec, 0xed, 0xae, 0xe6, 0xf6, 0x75, 0xbb, 0xdc, 0xde, 0x2d, 0x48, 0x6a, 0x69,
+	0xa6, 0x77, 0x46, 0x9a, 0xde, 0x6c, 0xcd, 0xcc, 0xdd, 0xb1, 0x77, 0xbf, 0x2b, 0x75, 0xa5, 0xa4,
+	0x3a, 0x75, 0x57, 0xf5, 0x56, 0x55, 0x6b, 0x46, 0xfb, 0x83, 0x30, 0x3e, 0x9e, 0x67, 0xc0, 0x71,
+	0x76, 0x10, 0x7e, 0x00, 0x41, 0x38, 0x30, 0x0e, 0xc0, 0xd8, 0x0e, 0x63, 0x30, 0x60, 0x0e, 0x1b,
+	0x0c, 0xb6, 0x03, 0xfb, 0x0f, 0x8c, 0x09, 0xdb, 0x47, 0x04, 0x61, 0x19, 0x06, 0x87, 0x89, 0xfb,
+	0xc3, 0x40, 0x18, 0xfc, 0x87, 0x65, 0xc2, 0x38, 0xf2, 0x59, 0x99, 0xd5, 0x55, 0xdd, 0xad, 0x59,
+	0x8d, 0x6e, 0xb9, 0xd8, 0xff, 0xba, 0xf3, 0xfb, 0xf2, 0xcb, 0xac, 0x7c, 0x7c, 0xf9, 0xe5, 0x97,
+	0xdf, 0x03, 0x5e, 0xdb, 0x7d, 0x35, 0x5a, 0xf0, 0x82, 0xab, 0xbb, 0x9d, 0x4d, 0x12, 0xfa, 0x24,
+	0x26, 0xd1, 0xd5, 0x3d, 0xe2, 0xbb, 0x41, 0x78, 0x55, 0x00, 0x9c, 0xb6, 0x77, 0xb5, 0x11, 0x84,
+	0xe4, 0xea, 0xde, 0xf3, 0x57, 0xb7, 0x89, 0x4f, 0x42, 0x27, 0x26, 0xee, 0x42, 0x3b, 0x0c, 0xe2,
+	0x00, 0x21, 0x8e, 0xb3, 0xe0, 0xb4, 0xbd, 0x05, 0x8a, 0xb3, 0xb0, 0xf7, 0xfc, 0xdc, 0x73, 0xdb,
+	0x5e, 0xbc, 0xd3, 0xd9, 0x5c, 0x68, 0x04, 0xad, 0xab, 0xdb, 0xc1, 0x76, 0x70, 0x95, 0xa1, 0x6e,
+	0x76, 0xb6, 0xd8, 0x3f, 0xf6, 0x87, 0xfd, 0xe2, 0x24, 0xe6, 0x5e, 0x4a, 0x9a, 0x69, 0x39, 0x8d,
+	0x1d, 0xcf, 0x27, 0xe1, 0xfe, 0xd5, 0xf6, 0xee, 0x36, 0x6b, 0x37, 0x24, 0x51, 0xd0, 0x09, 0x1b,
+	0x24, 0xdd, 0x70, 0xcf, 0x5a, 0xd1, 0xd5, 0x16, 0x89, 0x9d, 0x8c, 0xee, 0xce, 0x5d, 0xcd, 0xab,
+	0x15, 0x76, 0xfc, 0xd8, 0x6b, 0x75, 0x37, 0xf3, 0xd1, 0x7e, 0x15, 0xa2, 0xc6, 0x0e, 0x69, 0x39,
+	0x5d, 0xf5, 0x5e, 0xcc, 0xab, 0xd7, 0x89, 0xbd, 0xe6, 0x55, 0xcf, 0x8f, 0xa3, 0x38, 0x4c, 0x57,
+	0xb2, 0xbf, 0x62, 0xc1, 0xa5, 0xc5, 0xbb, 0xf5, 0x95, 0xa6, 0x13, 0xc5, 0x5e, 0x63, 0xa9, 0x19,
+	0x34, 0x76, 0xeb, 0x71, 0x10, 0x92, 0x3b, 0x41, 0xb3, 0xd3, 0x22, 0x75, 0x36, 0x10, 0xe8, 0x59,
+	0x28, 0xed, 0xb1, 0xff, 0xd5, 0xca, 0xac, 0x75, 0xc9, 0xba, 0x52, 0x5e, 0x9a, 0xfe, 0xf5, 0x83,
+	0xf9, 0x0f, 0x3c, 0x38, 0x98, 0x2f, 0xdd, 0x11, 0xe5, 0x58, 0x61, 0xa0, 0xcb, 0x30, 0xb2, 0x15,
+	0x6d, 0xec, 0xb7, 0xc9, 0x6c, 0x81, 0xe1, 0x4e, 0x0a, 0xdc, 0x91, 0xd5, 0x3a, 0x2d, 0xc5, 0x02,
+	0x8a, 0xae, 0x42, 0xb9, 0xed, 0x84, 0xb1, 0x17, 0x7b, 0x81, 0x3f, 0x5b, 0xbc, 0x64, 0x5d, 0x19,
+	0x5e, 0x9a, 0x11, 0xa8, 0xe5, 0x9a, 0x04, 0xe0, 0x04, 0x87, 0x76, 0x23, 0x24, 0x8e, 0x7b, 0xcb,
+	0x6f, 0xee, 0xcf, 0x0e, 0x5d, 0xb2, 0xae, 0x94, 0x92, 0x6e, 0x60, 0x51, 0x8e, 0x15, 0x86, 0xfd,
+	0x83, 0x05, 0x28, 0x2d, 0x6e, 0x6d, 0x79, 0xbe, 0x17, 0xef, 0xa3, 0x3b, 0x30, 0xee, 0x07, 0x2e,
+	0x91, 0xff, 0xd9, 0x57, 0x8c, 0xbd, 0x70, 0x69, 0xa1, 0x7b, 0x29, 0x2d, 0xac, 0x6b, 0x78, 0x4b,
+	0xd3, 0x0f, 0x0e, 0xe6, 0xc7, 0xf5, 0x12, 0x6c, 0xd0, 0x41, 0x18, 0xc6, 0xda, 0x81, 0xab, 0xc8,
+	0x16, 0x18, 0xd9, 0xf9, 0x2c, 0xb2, 0xb5, 0x04, 0x6d, 0x69, 0xea, 0xc1, 0xc1, 0xfc, 0x98, 0x56,
+	0x80, 0x75, 0x22, 0x68, 0x13, 0xa6, 0xe8, 0x5f, 0x3f, 0xf6, 0x14, 0xdd, 0x22, 0xa3, 0xfb, 0x64,
+	0x1e, 0x5d, 0x0d, 0x75, 0xe9, 0xd4, 0x83, 0x83, 0xf9, 0xa9, 0x54, 0x21, 0x4e, 0x13, 0xb4, 0xdf,
+	0x81, 0xc9, 0xc5, 0x38, 0x76, 0x1a, 0x3b, 0xc4, 0xe5, 0x33, 0x88, 0x5e, 0x82, 0x21, 0xdf, 0x69,
+	0x11, 0x31, 0xbf, 0x97, 0xc4, 0xc0, 0x0e, 0xad, 0x3b, 0x2d, 0x72, 0x78, 0x30, 0x3f, 0x7d, 0xdb,
+	0xf7, 0xde, 0xee, 0x88, 0x55, 0x41, 0xcb, 0x30, 0xc3, 0x46, 0x2f, 0x00, 0xb8, 0x64, 0xcf, 0x6b,
+	0x90, 0x9a, 0x13, 0xef, 0x88, 0xf9, 0x46, 0xa2, 0x2e, 0x54, 0x14, 0x04, 0x6b, 0x58, 0xf6, 0x7d,
+	0x28, 0x2f, 0xee, 0x05, 0x9e, 0x5b, 0x0b, 0xdc, 0x08, 0xed, 0xc2, 0x54, 0x3b, 0x24, 0x5b, 0x24,
+	0x54, 0x45, 0xb3, 0xd6, 0xa5, 0xe2, 0x95, 0xb1, 0x17, 0xae, 0x64, 0x7e, 0xac, 0x89, 0xba, 0xe2,
+	0xc7, 0xe1, 0xfe, 0xd2, 0x63, 0xa2, 0xbd, 0xa9, 0x14, 0x14, 0xa7, 0x29, 0xdb, 0xff, 0xaa, 0x00,
+	0x67, 0x16, 0xdf, 0xe9, 0x84, 0xa4, 0xe2, 0x45, 0xbb, 0xe9, 0x15, 0xee, 0x7a, 0xd1, 0xee, 0x7a,
+	0x32, 0x02, 0x6a, 0x69, 0x55, 0x44, 0x39, 0x56, 0x18, 0xe8, 0x39, 0x18, 0xa5, 0xbf, 0x6f, 0xe3,
+	0xaa, 0xf8, 0xe4, 0x53, 0x02, 0x79, 0xac, 0xe2, 0xc4, 0x4e, 0x85, 0x83, 0xb0, 0xc4, 0x41, 0x6b,
+	0x30, 0xd6, 0x60, 0x1b, 0x72, 0x7b, 0x2d, 0x70, 0x09, 0x9b, 0xcc, 0xf2, 0xd2, 0x33, 0x14, 0x7d,
+	0x39, 0x29, 0x3e, 0x3c, 0x98, 0x9f, 0xe5, 0x7d, 0x13, 0x24, 0x34, 0x18, 0xd6, 0xeb, 0x23, 0x5b,
+	0xed, 0xaf, 0x21, 0x46, 0x09, 0x32, 0xf6, 0xd6, 0x15, 0x6d, 0xab, 0x0c, 0xb3, 0xad, 0x32, 0x9e,
+	0xbd, 0x4d, 0xd0, 0xf3, 0x30, 0xb4, 0xeb, 0xf9, 0xee, 0xec, 0x08, 0xa3, 0x75, 0x81, 0xce, 0xf9,
+	0x0d, 0xcf, 0x77, 0x0f, 0x0f, 0xe6, 0x67, 0x8c, 0xee, 0xd0, 0x42, 0xcc, 0x50, 0xed, 0x3f, 0xb1,
+	0x60, 0x9e, 0xc1, 0x56, 0xbd, 0x26, 0xa9, 0x91, 0x30, 0xf2, 0xa2, 0x98, 0xf8, 0xb1, 0x31, 0xa0,
+	0x2f, 0x00, 0x44, 0xa4, 0x11, 0x92, 0x58, 0x1b, 0x52, 0xb5, 0x30, 0xea, 0x0a, 0x82, 0x35, 0x2c,
+	0xca, 0x10, 0xa2, 0x1d, 0x27, 0x64, 0xeb, 0x4b, 0x0c, 0xac, 0x62, 0x08, 0x75, 0x09, 0xc0, 0x09,
+	0x8e, 0xc1, 0x10, 0x8a, 0xfd, 0x18, 0x02, 0xfa, 0x04, 0x4c, 0x25, 0x8d, 0x45, 0x6d, 0xa7, 0x21,
+	0x07, 0x90, 0x6d, 0x99, 0xba, 0x09, 0xc2, 0x69, 0x5c, 0xfb, 0xef, 0x5b, 0x62, 0xf1, 0xd0, 0xaf,
+	0x7e, 0x8f, 0x7f, 0xab, 0xfd, 0x0b, 0x16, 0x8c, 0x2e, 0x79, 0xbe, 0xeb, 0xf9, 0xdb, 0xe8, 0x73,
+	0x50, 0xa2, 0x67, 0x93, 0xeb, 0xc4, 0x8e, 0xe0, 0x7b, 0x1f, 0xd1, 0xf6, 0x96, 0x3a, 0x2a, 0x16,
+	0xda, 0xbb, 0xdb, 0xb4, 0x20, 0x5a, 0xa0, 0xd8, 0x74, 0xb7, 0xdd, 0xda, 0xfc, 0x3c, 0x69, 0xc4,
+	0x6b, 0x24, 0x76, 0x92, 0xcf, 0x49, 0xca, 0xb0, 0xa2, 0x8a, 0x6e, 0xc0, 0x48, 0xec, 0x84, 0xdb,
+	0x24, 0x16, 0x0c, 0x30, 0x93, 0x51, 0xf1, 0x9a, 0x98, 0xee, 0x48, 0xe2, 0x37, 0x48, 0x72, 0x2c,
+	0x6c, 0xb0, 0xaa, 0x58, 0x90, 0xb0, 0xbf, 0x7f, 0x14, 0xce, 0x2d, 0xd7, 0xab, 0x39, 0xeb, 0xea,
+	0x32, 0x8c, 0xb8, 0xa1, 0xb7, 0x47, 0x42, 0x31, 0xce, 0x8a, 0x4a, 0x85, 0x95, 0x62, 0x01, 0x45,
+	0xaf, 0xc2, 0x38, 0x3f, 0x90, 0xae, 0x3b, 0xbe, 0xdb, 0x94, 0x43, 0x7c, 0x5a, 0x60, 0x8f, 0xdf,
+	0xd1, 0x60, 0xd8, 0xc0, 0x3c, 0xe2, 0xa2, 0xba, 0x9c, 0xda, 0x8c, 0x79, 0x87, 0xdd, 0x17, 0x2d,
+	0x98, 0xe6, 0xcd, 0x2c, 0xc6, 0x71, 0xe8, 0x6d, 0x76, 0x62, 0x12, 0xcd, 0x0e, 0x33, 0x4e, 0xb7,
+	0x9c, 0x35, 0x5a, 0xb9, 0x23, 0xb0, 0x70, 0x27, 0x45, 0x85, 0x33, 0xc1, 0x59, 0xd1, 0xee, 0x74,
+	0x1a, 0x8c, 0xbb, 0x9a, 0x45, 0xdf, 0x6e, 0xc1, 0x5c, 0x23, 0xf0, 0xe3, 0x30, 0x68, 0x36, 0x49,
+	0x58, 0xeb, 0x6c, 0x36, 0xbd, 0x68, 0x87, 0xaf, 0x53, 0x4c, 0xb6, 0x18, 0x27, 0xc8, 0x99, 0x43,
+	0x85, 0x24, 0xe6, 0xf0, 0xe2, 0x83, 0x83, 0xf9, 0xb9, 0xe5, 0x5c, 0x52, 0xb8, 0x47, 0x33, 0x68,
+	0x17, 0x10, 0x3d, 0x4a, 0xeb, 0xb1, 0xb3, 0x4d, 0x92, 0xc6, 0x47, 0x07, 0x6f, 0xfc, 0xec, 0x83,
+	0x83, 0x79, 0xb4, 0xde, 0x45, 0x02, 0x67, 0x90, 0x45, 0x6f, 0xc3, 0x69, 0x5a, 0xda, 0xf5, 0xad,
+	0xa5, 0xc1, 0x9b, 0x9b, 0x7d, 0x70, 0x30, 0x7f, 0x7a, 0x3d, 0x83, 0x08, 0xce, 0x24, 0x8d, 0xbe,
+	0xcd, 0x82, 0x73, 0xc9, 0xe7, 0xaf, 0xdc, 0x6f, 0x3b, 0xbe, 0x9b, 0x34, 0x5c, 0x1e, 0xbc, 0x61,
+	0xca, 0x93, 0xcf, 0x2d, 0xe7, 0x51, 0xc2, 0xf9, 0x8d, 0xcc, 0x2d, 0xc3, 0x99, 0xcc, 0xd5, 0x82,
+	0xa6, 0xa1, 0xb8, 0x4b, 0xb8, 0x14, 0x54, 0xc6, 0xf4, 0x27, 0x3a, 0x0d, 0xc3, 0x7b, 0x4e, 0xb3,
+	0x23, 0x36, 0x0a, 0xe6, 0x7f, 0x3e, 0x56, 0x78, 0xd5, 0xb2, 0xff, 0x75, 0x11, 0xa6, 0x96, 0xeb,
+	0xd5, 0x87, 0xda, 0x85, 0xfa, 0x31, 0x54, 0xe8, 0x79, 0x0c, 0x25, 0x87, 0x5a, 0x31, 0xf7, 0x50,
+	0xfb, 0x4b, 0x19, 0x5b, 0x68, 0x88, 0x6d, 0xa1, 0x6f, 0xc8, 0xd9, 0x42, 0xc7, 0xbc, 0x71, 0xf6,
+	0x72, 0x56, 0xd1, 0x30, 0x9b, 0xcc, 0x4c, 0x89, 0xe5, 0x66, 0xd0, 0x70, 0x9a, 0x69, 0xd6, 0x77,
+	0xc4, 0xa5, 0x74, 0x3c, 0xf3, 0xd8, 0x80, 0xf1, 0x65, 0xa7, 0xed, 0x6c, 0x7a, 0x4d, 0x2f, 0xf6,
+	0x48, 0x84, 0x9e, 0x82, 0xa2, 0xe3, 0xba, 0x4c, 0xda, 0x2a, 0x2f, 0x9d, 0x79, 0x70, 0x30, 0x5f,
+	0x5c, 0x74, 0xe9, 0xb1, 0x0f, 0x0a, 0x6b, 0x1f, 0x53, 0x0c, 0xf4, 0x61, 0x18, 0x72, 0xc3, 0xa0,
+	0x3d, 0x5b, 0x60, 0x98, 0x74, 0xd7, 0x0d, 0x55, 0xc2, 0xa0, 0x9d, 0x42, 0x65, 0x38, 0xf6, 0xaf,
+	0x14, 0xe0, 0xfc, 0x32, 0x69, 0xef, 0xac, 0xd6, 0x73, 0xf8, 0xf7, 0x15, 0x28, 0xb5, 0x02, 0xdf,
+	0x8b, 0x83, 0x30, 0x12, 0x4d, 0xb3, 0x15, 0xb1, 0x26, 0xca, 0xb0, 0x82, 0xa2, 0x4b, 0x30, 0xd4,
+	0x4e, 0x84, 0xca, 0x71, 0x29, 0x90, 0x32, 0x71, 0x92, 0x41, 0x28, 0x46, 0x27, 0x22, 0xa1, 0x58,
+	0x31, 0x0a, 0xe3, 0x76, 0x44, 0x42, 0xcc, 0x20, 0xc9, 0xc9, 0x4c, 0xcf, 0x6c, 0xc1, 0xa1, 0x53,
+	0x27, 0x33, 0x85, 0x60, 0x0d, 0x0b, 0xd5, 0xa0, 0x1c, 0xa5, 0x66, 0x76, 0xa0, 0x6d, 0x3a, 0xc1,
+	0x8e, 0x6e, 0x35, 0x93, 0x09, 0x11, 0xe3, 0x44, 0x19, 0xe9, 0x7b, 0x74, 0x7f, 0xb9, 0x00, 0x88,
+	0x0f, 0xe1, 0x5f, 0xb0, 0x81, 0xbb, 0xdd, 0x3d, 0x70, 0x83, 0x6f, 0x89, 0xe3, 0x1a, 0xbd, 0x3f,
+	0xb5, 0xe0, 0xfc, 0xb2, 0xe7, 0xbb, 0x24, 0xcc, 0x59, 0x80, 0x8f, 0xe6, 0x2e, 0x7b, 0x34, 0xa1,
+	0xc1, 0x58, 0x62, 0x43, 0xc7, 0xb0, 0xc4, 0xec, 0x3f, 0xb2, 0x00, 0xf1, 0xcf, 0x7e, 0xcf, 0x7d,
+	0xec, 0xed, 0xee, 0x8f, 0x3d, 0x86, 0x65, 0x61, 0xdf, 0x84, 0xc9, 0xe5, 0xa6, 0x47, 0xfc, 0xb8,
+	0x5a, 0x5b, 0x0e, 0xfc, 0x2d, 0x6f, 0x1b, 0x7d, 0x0c, 0x26, 0x63, 0xaf, 0x45, 0x82, 0x4e, 0x5c,
+	0x27, 0x8d, 0xc0, 0x67, 0x37, 0x49, 0xeb, 0xca, 0xf0, 0x12, 0x7a, 0x70, 0x30, 0x3f, 0xb9, 0x61,
+	0x40, 0x70, 0x0a, 0xd3, 0xfe, 0x1d, 0x3a, 0x7e, 0x41, 0xab, 0x1d, 0xf8, 0xc4, 0x8f, 0x97, 0x03,
+	0xdf, 0xe5, 0x1a, 0x87, 0x8f, 0xc1, 0x50, 0x4c, 0xc7, 0x83, 0x8f, 0xdd, 0x65, 0xb9, 0x51, 0xe8,
+	0x28, 0x1c, 0x1e, 0xcc, 0x9f, 0xed, 0xae, 0xc1, 0xc6, 0x89, 0xd5, 0x41, 0xdf, 0x00, 0x23, 0x51,
+	0xec, 0xc4, 0x9d, 0x48, 0x8c, 0xe6, 0x13, 0x72, 0x34, 0xeb, 0xac, 0xf4, 0xf0, 0x60, 0x7e, 0x4a,
+	0x55, 0xe3, 0x45, 0x58, 0x54, 0x40, 0x4f, 0xc3, 0x68, 0x8b, 0x44, 0x91, 0xb3, 0x2d, 0x4f, 0xc3,
+	0x29, 0x51, 0x77, 0x74, 0x8d, 0x17, 0x63, 0x09, 0x47, 0x4f, 0xc2, 0x30, 0x09, 0xc3, 0x20, 0x14,
+	0x7b, 0x74, 0x42, 0x20, 0x0e, 0xaf, 0xd0, 0x42, 0xcc, 0x61, 0xf6, 0xbf, 0xb7, 0x60, 0x4a, 0xf5,
+	0x95, 0xb7, 0x75, 0x02, 0xb7, 0x82, 0x4f, 0x03, 0x34, 0xe4, 0x07, 0x46, 0xec, 0xf4, 0x18, 0x7b,
+	0xe1, 0x72, 0xe6, 0x41, 0xdd, 0x35, 0x8c, 0x09, 0x65, 0x55, 0x14, 0x61, 0x8d, 0x9a, 0xfd, 0xcf,
+	0x2d, 0x38, 0x95, 0xfa, 0xa2, 0x9b, 0x5e, 0x14, 0xa3, 0xb7, 0xba, 0xbe, 0x6a, 0x61, 0xb0, 0xaf,
+	0xa2, 0xb5, 0xd9, 0x37, 0xa9, 0xa5, 0x2c, 0x4b, 0xb4, 0x2f, 0xba, 0x0e, 0xc3, 0x5e, 0x4c, 0x5a,
+	0xf2, 0x63, 0x9e, 0xec, 0xf9, 0x31, 0xbc, 0x57, 0xc9, 0x8c, 0x54, 0x69, 0x4d, 0xcc, 0x09, 0xd8,
+	0xbf, 0x52, 0x84, 0x32, 0x5f, 0xb6, 0x6b, 0x4e, 0xfb, 0x04, 0xe6, 0xe2, 0x19, 0x28, 0x7b, 0xad,
+	0x56, 0x27, 0x76, 0x36, 0x05, 0x3b, 0x2f, 0xf1, 0xad, 0x55, 0x95, 0x85, 0x38, 0x81, 0xa3, 0x2a,
+	0x0c, 0xb1, 0xae, 0xf0, 0xaf, 0x7c, 0x2a, 0xfb, 0x2b, 0x45, 0xdf, 0x17, 0x2a, 0x4e, 0xec, 0x70,
+	0x49, 0x4a, 0x9d, 0x23, 0xb4, 0x08, 0x33, 0x12, 0xc8, 0x01, 0xd8, 0xf4, 0x7c, 0x27, 0xdc, 0xa7,
+	0x65, 0xb3, 0x45, 0x46, 0xf0, 0xb9, 0xde, 0x04, 0x97, 0x14, 0x3e, 0x27, 0xab, 0x3e, 0x2c, 0x01,
+	0x60, 0x8d, 0xe8, 0xdc, 0x2b, 0x50, 0x56, 0xc8, 0x47, 0x11, 0x88, 0xe6, 0x3e, 0x01, 0x53, 0xa9,
+	0xb6, 0xfa, 0x55, 0x1f, 0xd7, 0xe5, 0xa9, 0x5f, 0x64, 0x2c, 0x43, 0xf4, 0x7a, 0xc5, 0xdf, 0x13,
+	0x2c, 0xf7, 0x1d, 0x38, 0xdd, 0xcc, 0xe0, 0x64, 0x62, 0x5e, 0x07, 0xe7, 0x7c, 0xe7, 0xc5, 0x67,
+	0x9f, 0xce, 0x82, 0xe2, 0xcc, 0x36, 0xa8, 0x8c, 0x10, 0xb4, 0xe9, 0x06, 0x71, 0x9a, 0xba, 0xb8,
+	0x7d, 0x4b, 0x94, 0x61, 0x05, 0xa5, 0xfc, 0xee, 0xb4, 0xea, 0xfc, 0x0d, 0xb2, 0x5f, 0x27, 0x4d,
+	0xd2, 0x88, 0x83, 0xf0, 0x6b, 0xda, 0xfd, 0x0b, 0x7c, 0xf4, 0x39, 0xbb, 0x1c, 0x13, 0x04, 0x8a,
+	0x37, 0xc8, 0x3e, 0x9f, 0x0a, 0xfd, 0xeb, 0x8a, 0x3d, 0xbf, 0xee, 0xa7, 0x2d, 0x98, 0x50, 0x5f,
+	0x77, 0x02, 0x7c, 0x61, 0xc9, 0xe4, 0x0b, 0x17, 0x7a, 0x2e, 0xf0, 0x1c, 0x8e, 0xf0, 0xe5, 0x02,
+	0x9c, 0x53, 0x38, 0xf4, 0x6e, 0xc0, 0xff, 0x88, 0x55, 0x75, 0x15, 0xca, 0xbe, 0xd2, 0x5a, 0x59,
+	0xa6, 0xba, 0x28, 0xd1, 0x59, 0x25, 0x38, 0x54, 0xc4, 0xf3, 0x13, 0xd5, 0xd2, 0xb8, 0xae, 0xce,
+	0x15, 0xaa, 0xdb, 0x25, 0x28, 0x76, 0x3c, 0x57, 0x1c, 0x30, 0x1f, 0x91, 0xa3, 0x7d, 0xbb, 0x5a,
+	0x39, 0x3c, 0x98, 0x7f, 0x22, 0xef, 0x29, 0x81, 0x9e, 0x6c, 0xd1, 0xc2, 0xed, 0x6a, 0x05, 0xd3,
+	0xca, 0x68, 0x11, 0xa6, 0xe4, 0x6b, 0xc9, 0x1d, 0x2a, 0x6e, 0x05, 0xbe, 0x38, 0x87, 0x94, 0x4e,
+	0x16, 0x9b, 0x60, 0x9c, 0xc6, 0x47, 0x15, 0x98, 0xde, 0xed, 0x6c, 0x92, 0x26, 0x89, 0xf9, 0x07,
+	0xdf, 0x20, 0x5c, 0x63, 0x59, 0x4e, 0x6e, 0x66, 0x37, 0x52, 0x70, 0xdc, 0x55, 0xc3, 0xfe, 0x73,
+	0x76, 0x1e, 0x88, 0xd1, 0xab, 0x85, 0x01, 0x5d, 0x58, 0x94, 0xfa, 0xd7, 0x72, 0x39, 0x0f, 0xb2,
+	0x2a, 0x6e, 0x90, 0xfd, 0x8d, 0x80, 0x4a, 0xe6, 0xd9, 0xab, 0xc2, 0x58, 0xf3, 0x43, 0x3d, 0xd7,
+	0xfc, 0xcf, 0x16, 0xe0, 0x8c, 0x1a, 0x01, 0x43, 0x08, 0xfc, 0x8b, 0x3e, 0x06, 0xcf, 0xc3, 0x98,
+	0x4b, 0xb6, 0x9c, 0x4e, 0x33, 0x56, 0xea, 0xf3, 0x61, 0xfe, 0x84, 0x52, 0x49, 0x8a, 0xb1, 0x8e,
+	0x73, 0x84, 0x61, 0xfb, 0x5f, 0x63, 0xec, 0x20, 0x8e, 0x1d, 0xba, 0xc6, 0xd5, 0xae, 0xb1, 0x72,
+	0x77, 0xcd, 0x93, 0x30, 0xec, 0xb5, 0xa8, 0x60, 0x56, 0x30, 0xe5, 0xad, 0x2a, 0x2d, 0xc4, 0x1c,
+	0x86, 0x3e, 0x04, 0xa3, 0x8d, 0xa0, 0xd5, 0x72, 0x7c, 0x97, 0x1d, 0x79, 0xe5, 0xa5, 0x31, 0x2a,
+	0xbb, 0x2d, 0xf3, 0x22, 0x2c, 0x61, 0xe8, 0x3c, 0x0c, 0x39, 0xe1, 0x36, 0xd7, 0x61, 0x94, 0x97,
+	0x4a, 0xb4, 0xa5, 0xc5, 0x70, 0x3b, 0xc2, 0xac, 0x94, 0x5e, 0xc1, 0xee, 0x05, 0xe1, 0xae, 0xe7,
+	0x6f, 0x57, 0xbc, 0x50, 0x6c, 0x09, 0x75, 0x16, 0xde, 0x55, 0x10, 0xac, 0x61, 0xa1, 0x55, 0x18,
+	0x6e, 0x07, 0x61, 0x1c, 0xcd, 0x8e, 0xb0, 0xe1, 0x7e, 0x22, 0x87, 0x11, 0xf1, 0xaf, 0xad, 0x05,
+	0x61, 0x9c, 0x7c, 0x00, 0xfd, 0x17, 0x61, 0x5e, 0x1d, 0xdd, 0x84, 0x51, 0xe2, 0xef, 0xad, 0x86,
+	0x41, 0x6b, 0xf6, 0x54, 0x3e, 0xa5, 0x15, 0x8e, 0xc2, 0x97, 0x59, 0x22, 0xa3, 0x8a, 0x62, 0x2c,
+	0x49, 0xa0, 0x6f, 0x80, 0x22, 0xf1, 0xf7, 0x66, 0x47, 0x19, 0xa5, 0xb9, 0x1c, 0x4a, 0x77, 0x9c,
+	0x30, 0xe1, 0xf9, 0x2b, 0xfe, 0x1e, 0xa6, 0x75, 0xd0, 0xa7, 0xa0, 0x2c, 0x19, 0x46, 0x24, 0x94,
+	0x75, 0x99, 0x0b, 0x56, 0xb2, 0x19, 0x4c, 0xde, 0xee, 0x78, 0x21, 0x69, 0x11, 0x3f, 0x8e, 0x12,
+	0x0e, 0x29, 0xa1, 0x11, 0x4e, 0xa8, 0xa1, 0x4f, 0x49, 0x0d, 0xf1, 0x5a, 0xd0, 0xf1, 0xe3, 0x68,
+	0xb6, 0xcc, 0xba, 0x97, 0xf9, 0x76, 0x77, 0x27, 0xc1, 0x4b, 0xab, 0x90, 0x79, 0x65, 0x6c, 0x90,
+	0x42, 0x9f, 0x81, 0x09, 0xfe, 0x9f, 0xbf, 0x80, 0x45, 0xb3, 0x67, 0x18, 0xed, 0x4b, 0xf9, 0xb4,
+	0x39, 0xe2, 0xd2, 0x19, 0x41, 0x7c, 0x42, 0x2f, 0x8d, 0xb0, 0x49, 0x0d, 0x61, 0x98, 0x68, 0x7a,
+	0x7b, 0xc4, 0x27, 0x51, 0x54, 0x0b, 0x83, 0x4d, 0x32, 0x0b, 0x6c, 0x60, 0xce, 0x65, 0xbf, 0x98,
+	0x05, 0x9b, 0x64, 0x69, 0x86, 0xd2, 0xbc, 0xa9, 0xd7, 0xc1, 0x26, 0x09, 0x74, 0x1b, 0x26, 0xe9,
+	0x8d, 0xcd, 0x4b, 0x88, 0x8e, 0xf5, 0x23, 0xca, 0xee, 0x55, 0xd8, 0xa8, 0x84, 0x53, 0x44, 0xd0,
+	0x2d, 0x18, 0x8f, 0x62, 0x27, 0x8c, 0x3b, 0x6d, 0x4e, 0xf4, 0x6c, 0x3f, 0xa2, 0xec, 0xc1, 0xb5,
+	0xae, 0x55, 0xc1, 0x06, 0x01, 0xf4, 0x06, 0x94, 0x9b, 0xde, 0x16, 0x69, 0xec, 0x37, 0x9a, 0x64,
+	0x76, 0x9c, 0x51, 0xcb, 0x64, 0x2a, 0x37, 0x25, 0x12, 0x97, 0x73, 0xd5, 0x5f, 0x9c, 0x54, 0x47,
+	0x77, 0xe0, 0x6c, 0x4c, 0xc2, 0x96, 0xe7, 0x3b, 0x94, 0x19, 0x88, 0xab, 0x15, 0x7b, 0xc8, 0x9c,
+	0x60, 0xbb, 0xed, 0xa2, 0x98, 0x8d, 0xb3, 0x1b, 0x99, 0x58, 0x38, 0xa7, 0x36, 0xba, 0x0f, 0xb3,
+	0x19, 0x90, 0xa0, 0xe9, 0x35, 0xf6, 0x67, 0x4f, 0x33, 0xca, 0x1f, 0x17, 0x94, 0x67, 0x37, 0x72,
+	0xf0, 0x0e, 0x7b, 0xc0, 0x70, 0x2e, 0x75, 0x74, 0x0b, 0xa6, 0x18, 0x07, 0xaa, 0x75, 0x9a, 0x4d,
+	0xd1, 0xe0, 0x24, 0x6b, 0xf0, 0x43, 0xf2, 0x3c, 0xae, 0x9a, 0xe0, 0xc3, 0x83, 0x79, 0x48, 0xfe,
+	0xe1, 0x74, 0x6d, 0xb4, 0xc9, 0xde, 0xcc, 0x3a, 0xa1, 0x17, 0xef, 0x53, 0xbe, 0x41, 0xee, 0xc7,
+	0xb3, 0x53, 0x3d, 0xf5, 0x15, 0x3a, 0xaa, 0x7a, 0x58, 0xd3, 0x0b, 0x71, 0x9a, 0x20, 0x65, 0xa9,
+	0x51, 0xec, 0x7a, 0xfe, 0xec, 0x34, 0xbf, 0x97, 0x48, 0x8e, 0x54, 0xa7, 0x85, 0x98, 0xc3, 0xd8,
+	0x7b, 0x19, 0xfd, 0x71, 0x8b, 0x9e, 0x5c, 0x33, 0x0c, 0x31, 0x79, 0x2f, 0x93, 0x00, 0x9c, 0xe0,
+	0x50, 0x61, 0x32, 0x8e, 0xf7, 0x67, 0x11, 0x43, 0x55, 0x8c, 0x65, 0x63, 0xe3, 0x53, 0x98, 0x96,
+	0xdb, 0x9b, 0x30, 0xa9, 0x18, 0x21, 0x1b, 0x13, 0x34, 0x0f, 0xc3, 0x4c, 0x7c, 0x12, 0xda, 0xb5,
+	0x32, 0xed, 0x02, 0x13, 0xad, 0x30, 0x2f, 0x67, 0x5d, 0xf0, 0xde, 0x21, 0x4b, 0xfb, 0x31, 0xe1,
+	0x77, 0xfa, 0xa2, 0xd6, 0x05, 0x09, 0xc0, 0x09, 0x8e, 0xfd, 0x7f, 0xb9, 0x18, 0x9a, 0x70, 0xdb,
+	0x01, 0xce, 0x97, 0x67, 0xa1, 0xb4, 0x13, 0x44, 0x31, 0xc5, 0x66, 0x6d, 0x0c, 0x27, 0x82, 0xe7,
+	0x75, 0x51, 0x8e, 0x15, 0x06, 0x7a, 0x0d, 0x26, 0x1a, 0x7a, 0x03, 0xe2, 0x70, 0x54, 0x6c, 0xc4,
+	0x68, 0x1d, 0x9b, 0xb8, 0xe8, 0x55, 0x28, 0x31, 0x1b, 0x90, 0x46, 0xd0, 0x14, 0x52, 0x9b, 0x3c,
+	0xe1, 0x4b, 0x35, 0x51, 0x7e, 0xa8, 0xfd, 0xc6, 0x0a, 0x1b, 0x5d, 0x86, 0x11, 0xda, 0x85, 0x6a,
+	0x4d, 0x1c, 0x4b, 0x4a, 0x51, 0x74, 0x9d, 0x95, 0x62, 0x01, 0xb5, 0xff, 0x7a, 0x41, 0x1b, 0x65,
+	0x7a, 0x1f, 0x26, 0xa8, 0x06, 0xa3, 0xf7, 0x1c, 0x2f, 0xf6, 0xfc, 0x6d, 0x21, 0x7f, 0x3c, 0xdd,
+	0xf3, 0x8c, 0x62, 0x95, 0xee, 0xf2, 0x0a, 0xfc, 0x14, 0x15, 0x7f, 0xb0, 0x24, 0x43, 0x29, 0x86,
+	0x1d, 0xdf, 0xa7, 0x14, 0x0b, 0x83, 0x52, 0xc4, 0xbc, 0x02, 0xa7, 0x28, 0xfe, 0x60, 0x49, 0x06,
+	0xbd, 0x05, 0x20, 0x77, 0x18, 0x71, 0x85, 0xed, 0xc5, 0xb3, 0xfd, 0x89, 0x6e, 0xa8, 0x3a, 0x4b,
+	0x93, 0xf4, 0x8c, 0x4e, 0xfe, 0x63, 0x8d, 0x9e, 0x1d, 0x33, 0x39, 0xad, 0xbb, 0x33, 0xe8, 0x9b,
+	0xe9, 0x12, 0x77, 0xc2, 0x98, 0xb8, 0x8b, 0xb1, 0x18, 0x9c, 0x0f, 0x0f, 0x76, 0x49, 0xd9, 0xf0,
+	0x5a, 0x44, 0xdf, 0x0e, 0x82, 0x08, 0x4e, 0xe8, 0xd9, 0x3f, 0x5f, 0x84, 0xd9, 0xbc, 0xee, 0xd2,
+	0x45, 0x47, 0xee, 0x7b, 0xf1, 0x32, 0x15, 0xaf, 0x2c, 0x73, 0xd1, 0xad, 0x88, 0x72, 0xac, 0x30,
+	0xe8, 0xec, 0x47, 0xde, 0xb6, 0xbc, 0x63, 0x0e, 0x27, 0xb3, 0x5f, 0x67, 0xa5, 0x58, 0x40, 0x29,
+	0x5e, 0x48, 0x9c, 0x48, 0x18, 0xf7, 0x68, 0xab, 0x04, 0xb3, 0x52, 0x2c, 0xa0, 0xba, 0xb6, 0x6b,
+	0xa8, 0x8f, 0xb6, 0xcb, 0x18, 0xa2, 0xe1, 0xe3, 0x1d, 0x22, 0xf4, 0x59, 0x80, 0x2d, 0xcf, 0xf7,
+	0xa2, 0x1d, 0x46, 0x7d, 0xe4, 0xc8, 0xd4, 0x95, 0x70, 0xb6, 0xaa, 0xa8, 0x60, 0x8d, 0x22, 0x7a,
+	0x19, 0xc6, 0xd4, 0x06, 0xac, 0x56, 0xd8, 0x4b, 0xa7, 0x66, 0x39, 0x92, 0x70, 0xa3, 0x0a, 0xd6,
+	0xf1, 0xec, 0xcf, 0xa7, 0xd7, 0x8b, 0xd8, 0x01, 0xda, 0xf8, 0x5a, 0x83, 0x8e, 0x6f, 0xa1, 0xf7,
+	0xf8, 0xda, 0x5f, 0x2d, 0xc2, 0x94, 0xd1, 0x58, 0x27, 0x1a, 0x80, 0x67, 0x5d, 0xa3, 0x0c, 0xdc,
+	0x89, 0x89, 0xd8, 0x7f, 0x76, 0xff, 0xad, 0xa2, 0x33, 0x79, 0xba, 0x03, 0x78, 0x7d, 0xf4, 0x59,
+	0x28, 0x37, 0x9d, 0x88, 0x69, 0xce, 0x88, 0xd8, 0x77, 0x83, 0x10, 0x4b, 0x2e, 0x26, 0x4e, 0x14,
+	0x6b, 0xa7, 0x26, 0xa7, 0x9d, 0x90, 0xa4, 0x27, 0x0d, 0x95, 0x4f, 0xa4, 0xf5, 0x98, 0xea, 0x04,
+	0x15, 0x62, 0xf6, 0x31, 0x87, 0xa1, 0x57, 0x61, 0x3c, 0x24, 0x6c, 0x55, 0x2c, 0x53, 0x69, 0x8e,
+	0x2d, 0xb3, 0xe1, 0x44, 0xec, 0xc3, 0x1a, 0x0c, 0x1b, 0x98, 0xc9, 0xdd, 0x60, 0xa4, 0xc7, 0xdd,
+	0xe0, 0x69, 0x18, 0x65, 0x3f, 0xd4, 0x0a, 0x50, 0xb3, 0x51, 0xe5, 0xc5, 0x58, 0xc2, 0xd3, 0x0b,
+	0xa6, 0x34, 0xd8, 0x82, 0xa1, 0xb7, 0x0f, 0xb1, 0xa8, 0xd9, 0x2b, 0x73, 0x89, 0x73, 0x39, 0xb1,
+	0xe4, 0xb1, 0x84, 0xd9, 0x1f, 0x86, 0xc9, 0x8a, 0x43, 0x5a, 0x81, 0xbf, 0xe2, 0xbb, 0xed, 0xc0,
+	0xf3, 0x63, 0x34, 0x0b, 0x43, 0xec, 0x10, 0xe1, 0x2c, 0x60, 0x88, 0x36, 0x84, 0x87, 0xe8, 0x85,
+	0xc0, 0xde, 0x86, 0x33, 0x95, 0xe0, 0x9e, 0x7f, 0xcf, 0x09, 0xdd, 0xc5, 0x5a, 0x55, 0xbb, 0x5f,
+	0xaf, 0xcb, 0xfb, 0x1d, 0x37, 0xda, 0xca, 0x64, 0xbd, 0x5a, 0x4d, 0x2e, 0xd6, 0xae, 0x7a, 0x4d,
+	0x92, 0xa3, 0x05, 0xf9, 0x9b, 0x05, 0xa3, 0xa5, 0x04, 0x5f, 0xbd, 0x6a, 0x59, 0xb9, 0xaf, 0x5a,
+	0x6f, 0x42, 0x69, 0xcb, 0x23, 0x4d, 0x17, 0x93, 0x2d, 0xb1, 0x12, 0x9f, 0xca, 0xb7, 0x43, 0x59,
+	0xa5, 0x98, 0x52, 0xeb, 0xc5, 0x6f, 0x87, 0xab, 0xa2, 0x32, 0x56, 0x64, 0xd0, 0x2e, 0x4c, 0xcb,
+	0x0b, 0x83, 0x84, 0x8a, 0x75, 0xf9, 0x74, 0xaf, 0x5b, 0x88, 0x49, 0xfc, 0xf4, 0x83, 0x83, 0xf9,
+	0x69, 0x9c, 0x22, 0x83, 0xbb, 0x08, 0xd3, 0xeb, 0x60, 0x8b, 0x72, 0xe0, 0x21, 0x36, 0xfc, 0xec,
+	0x3a, 0xc8, 0x6e, 0xb6, 0xac, 0xd4, 0xfe, 0x61, 0x0b, 0x1e, 0xeb, 0x1a, 0x19, 0x71, 0xc3, 0x3f,
+	0xe6, 0x59, 0x48, 0xdf, 0xb8, 0x0b, 0xfd, 0x6f, 0xdc, 0xf6, 0x3f, 0xb0, 0xe0, 0xf4, 0x4a, 0xab,
+	0x1d, 0xef, 0x57, 0x3c, 0xf3, 0x09, 0xea, 0x15, 0x18, 0x69, 0x11, 0xd7, 0xeb, 0xb4, 0xc4, 0xcc,
+	0xcd, 0x4b, 0x2e, 0xb5, 0xc6, 0x4a, 0x0f, 0x0f, 0xe6, 0x27, 0xea, 0x71, 0x10, 0x3a, 0xdb, 0x84,
+	0x17, 0x60, 0x81, 0xce, 0x78, 0xbd, 0xf7, 0x0e, 0xb9, 0xe9, 0xb5, 0x3c, 0x69, 0x57, 0xd4, 0x53,
+	0x67, 0xb7, 0x20, 0x07, 0x74, 0xe1, 0xcd, 0x8e, 0xe3, 0xc7, 0x5e, 0xbc, 0x2f, 0x5e, 0x8f, 0x24,
+	0x11, 0x9c, 0xd0, 0xb3, 0xbf, 0x62, 0xc1, 0x94, 0x5c, 0xf7, 0x8b, 0xae, 0x1b, 0x92, 0x28, 0x42,
+	0x73, 0x50, 0xf0, 0xda, 0xa2, 0x97, 0x20, 0x7a, 0x59, 0xa8, 0xd6, 0x70, 0xc1, 0x6b, 0x4b, 0xb1,
+	0x8c, 0x31, 0xc2, 0xa2, 0xf9, 0x90, 0x76, 0x5d, 0x94, 0x63, 0x85, 0x81, 0xae, 0x40, 0xc9, 0x0f,
+	0x5c, 0x6e, 0xdb, 0xc5, 0x8f, 0x34, 0xb6, 0xc0, 0xd6, 0x45, 0x19, 0x56, 0x50, 0x54, 0x83, 0x32,
+	0x37, 0x7b, 0x4a, 0x16, 0xed, 0x40, 0xc6, 0x53, 0xec, 0xcb, 0x36, 0x64, 0x4d, 0x9c, 0x10, 0xb1,
+	0x7f, 0xd9, 0x82, 0x71, 0xf9, 0x65, 0x03, 0xca, 0x9c, 0x74, 0x6b, 0x25, 0xf2, 0x66, 0xb2, 0xb5,
+	0xa8, 0xcc, 0xc8, 0x20, 0x86, 0xa8, 0x58, 0x3c, 0x92, 0xa8, 0xf8, 0x3c, 0x8c, 0x39, 0xed, 0x76,
+	0xcd, 0x94, 0x33, 0xd9, 0x52, 0x5a, 0x4c, 0x8a, 0xb1, 0x8e, 0x63, 0xff, 0x50, 0x01, 0x26, 0xe5,
+	0x17, 0xd4, 0x3b, 0x9b, 0x11, 0x89, 0xd1, 0x06, 0x94, 0x1d, 0x3e, 0x4b, 0x44, 0x2e, 0xf2, 0x27,
+	0xb3, 0xf5, 0x08, 0xc6, 0x94, 0x26, 0x07, 0xfe, 0xa2, 0xac, 0x8d, 0x13, 0x42, 0xa8, 0x09, 0x33,
+	0x7e, 0x10, 0x33, 0xe6, 0xaf, 0xe0, 0xbd, 0x9e, 0x76, 0xd2, 0xd4, 0xcf, 0x09, 0xea, 0x33, 0xeb,
+	0x69, 0x2a, 0xb8, 0x9b, 0x30, 0x5a, 0x91, 0xba, 0x99, 0x62, 0xbe, 0x32, 0x40, 0x9f, 0xb8, 0x6c,
+	0xd5, 0x8c, 0xfd, 0x4b, 0x16, 0x94, 0x25, 0xda, 0x49, 0xbc, 0xe2, 0xad, 0xc1, 0x68, 0xc4, 0x26,
+	0x41, 0x0e, 0x8d, 0xdd, 0xab, 0xe3, 0x7c, 0xbe, 0x92, 0x33, 0x8d, 0xff, 0x8f, 0xb0, 0xa4, 0xc1,
+	0x54, 0xf3, 0xaa, 0xfb, 0xef, 0x11, 0xd5, 0xbc, 0xea, 0x4f, 0xce, 0xa1, 0xf4, 0x07, 0xac, 0xcf,
+	0x9a, 0xae, 0x8b, 0x8a, 0x5e, 0xed, 0x90, 0x6c, 0x79, 0xf7, 0xd3, 0xa2, 0x57, 0x8d, 0x95, 0x62,
+	0x01, 0x45, 0x6f, 0xc1, 0x78, 0x43, 0xea, 0x64, 0x93, 0x1d, 0x7e, 0xb9, 0xe7, 0xfb, 0x80, 0x7a,
+	0x4a, 0xe2, 0xba, 0x90, 0x65, 0xad, 0x3e, 0x36, 0xa8, 0x99, 0x66, 0x04, 0xc5, 0x7e, 0x66, 0x04,
+	0x09, 0xdd, 0xfc, 0x47, 0xf5, 0x1f, 0xb1, 0x60, 0x84, 0xeb, 0xe2, 0x06, 0x53, 0x85, 0x6a, 0x2f,
+	0x6b, 0xc9, 0xd8, 0xdd, 0xa1, 0x85, 0xe2, 0xa5, 0x0c, 0xad, 0x41, 0x99, 0xfd, 0x60, 0xba, 0xc4,
+	0x62, 0xbe, 0xd5, 0x3d, 0x6f, 0x55, 0xef, 0xe0, 0x1d, 0x59, 0x0d, 0x27, 0x14, 0xec, 0x1f, 0x28,
+	0x52, 0xee, 0x96, 0xa0, 0x1a, 0x87, 0xbe, 0xf5, 0xe8, 0x0e, 0xfd, 0xc2, 0xa3, 0x3a, 0xf4, 0xb7,
+	0x61, 0xaa, 0xa1, 0xbd, 0xc3, 0x25, 0x33, 0x79, 0xa5, 0xe7, 0x22, 0xd1, 0x9e, 0xec, 0xb8, 0x96,
+	0x65, 0xd9, 0x24, 0x82, 0xd3, 0x54, 0xd1, 0x37, 0xc3, 0x38, 0x9f, 0x67, 0xd1, 0x0a, 0xb7, 0xc4,
+	0xf8, 0x50, 0xfe, 0x7a, 0xd1, 0x9b, 0xe0, 0x5a, 0x39, 0xad, 0x3a, 0x36, 0x88, 0xd9, 0x7f, 0x6c,
+	0x01, 0x5a, 0x69, 0xef, 0x90, 0x16, 0x09, 0x9d, 0x66, 0xa2, 0x4e, 0xff, 0x2b, 0x16, 0xcc, 0x92,
+	0xae, 0xe2, 0xe5, 0xa0, 0xd5, 0x12, 0x97, 0x96, 0x9c, 0x7b, 0xf5, 0x4a, 0x4e, 0x1d, 0xe5, 0x96,
+	0x30, 0x9b, 0x87, 0x81, 0x73, 0xdb, 0x43, 0x6b, 0x70, 0x8a, 0x9f, 0x92, 0x0a, 0xa0, 0xd9, 0x5e,
+	0x3f, 0x2e, 0x08, 0x9f, 0xda, 0xe8, 0x46, 0xc1, 0x59, 0xf5, 0xec, 0xef, 0x18, 0x87, 0xdc, 0x5e,
+	0xbc, 0xff, 0x8e, 0xf0, 0xfe, 0x3b, 0xc2, 0xfb, 0xef, 0x08, 0xef, 0xbf, 0x23, 0xbc, 0xff, 0x8e,
+	0xf0, 0x75, 0xff, 0x8e, 0xf0, 0x87, 0x16, 0x9c, 0xea, 0x3e, 0x06, 0x4e, 0x42, 0x30, 0xef, 0xc0,
+	0xa9, 0xee, 0xb3, 0xae, 0xa7, 0x9d, 0x5d, 0x77, 0x3f, 0x93, 0x73, 0x2f, 0xe3, 0x1b, 0x70, 0x16,
+	0x7d, 0xfb, 0xe7, 0x4b, 0x30, 0xbc, 0xb2, 0x47, 0xfc, 0xf8, 0x04, 0x3e, 0xb1, 0x01, 0x93, 0x9e,
+	0xbf, 0x17, 0x34, 0xf7, 0x88, 0xcb, 0xe1, 0x47, 0xb9, 0x22, 0x9f, 0x15, 0xa4, 0x27, 0xab, 0x06,
+	0x09, 0x9c, 0x22, 0xf9, 0x28, 0xd4, 0xd4, 0xd7, 0x60, 0x84, 0x9f, 0x0e, 0x42, 0x47, 0x9d, 0x79,
+	0x18, 0xb0, 0x41, 0x14, 0x67, 0x5e, 0xa2, 0x42, 0xe7, 0xa7, 0x8f, 0xa8, 0x8e, 0x3e, 0x0f, 0x93,
+	0x5b, 0x5e, 0x18, 0xc5, 0x1b, 0x5e, 0x8b, 0x44, 0xb1, 0xd3, 0x6a, 0x3f, 0x84, 0x5a, 0x5a, 0x8d,
+	0xc3, 0xaa, 0x41, 0x09, 0xa7, 0x28, 0xa3, 0x6d, 0x98, 0x68, 0x3a, 0x7a, 0x53, 0xa3, 0x47, 0x6e,
+	0x4a, 0x1d, 0x3b, 0x37, 0x75, 0x42, 0xd8, 0xa4, 0x4b, 0xf7, 0x69, 0x83, 0x69, 0x56, 0x4b, 0x4c,
+	0xdf, 0xa0, 0xf6, 0x29, 0x57, 0xa9, 0x72, 0x18, 0x95, 0xa0, 0x98, 0xe5, 0x6d, 0xd9, 0x94, 0xa0,
+	0x34, 0xfb, 0xda, 0xcf, 0x41, 0x99, 0xd0, 0x21, 0xa4, 0x84, 0xc5, 0xc9, 0x75, 0x75, 0xb0, 0xbe,
+	0xae, 0x79, 0x8d, 0x30, 0x30, 0x1f, 0x04, 0x56, 0x24, 0x25, 0x9c, 0x10, 0x45, 0xcb, 0x30, 0x12,
+	0x91, 0xd0, 0x23, 0x91, 0x38, 0xc3, 0x7a, 0x4c, 0x23, 0x43, 0xe3, 0x4e, 0x2b, 0xfc, 0x37, 0x16,
+	0x55, 0xe9, 0xf2, 0x72, 0x98, 0xae, 0x94, 0x9d, 0x32, 0xda, 0xf2, 0x5a, 0x64, 0xa5, 0x58, 0x40,
+	0xd1, 0x1b, 0x30, 0x1a, 0x92, 0x26, 0x7b, 0x71, 0x9a, 0x18, 0x7c, 0x91, 0xf3, 0x07, 0x2c, 0x5e,
+	0x0f, 0x4b, 0x02, 0xe8, 0x06, 0xa0, 0x90, 0x50, 0x09, 0xcc, 0xf3, 0xb7, 0x95, 0x3d, 0xaa, 0xe0,
+	0xe0, 0x6a, 0xc7, 0xe3, 0x04, 0x43, 0xfa, 0x0f, 0xe1, 0x8c, 0x6a, 0xe8, 0x1a, 0xcc, 0xa8, 0xd2,
+	0xaa, 0x1f, 0xc5, 0x0e, 0xe5, 0x9c, 0x53, 0x8c, 0x96, 0x52, 0x80, 0xe0, 0x34, 0x02, 0xee, 0xae,
+	0x63, 0xff, 0xa4, 0x05, 0x7c, 0x9c, 0x4f, 0xe0, 0xda, 0xff, 0xba, 0x79, 0xed, 0x3f, 0x97, 0x3b,
+	0x73, 0x39, 0x57, 0xfe, 0x07, 0x16, 0x8c, 0x69, 0x33, 0x9b, 0xac, 0x59, 0xab, 0xc7, 0x9a, 0xed,
+	0xc0, 0x34, 0x5d, 0xe9, 0xb7, 0x36, 0x23, 0x12, 0xee, 0x11, 0x97, 0x2d, 0xcc, 0xc2, 0xc3, 0x2d,
+	0x4c, 0x65, 0xfb, 0x76, 0x33, 0x45, 0x10, 0x77, 0x35, 0x81, 0x5e, 0x91, 0xcf, 0x2f, 0x45, 0xc3,
+	0xce, 0x9c, 0x3f, 0xad, 0x1c, 0x1e, 0xcc, 0x4f, 0x6b, 0x1f, 0xa2, 0x3f, 0xb7, 0xd8, 0x9f, 0x93,
+	0xdf, 0xa8, 0x6c, 0x0c, 0x1b, 0x6a, 0xb1, 0xa4, 0x6c, 0x0c, 0xd5, 0x72, 0xc0, 0x09, 0x0e, 0xdd,
+	0xa3, 0x3b, 0x41, 0x14, 0xa7, 0x6d, 0x0c, 0xaf, 0x07, 0x51, 0x8c, 0x19, 0xc4, 0x7e, 0x11, 0x60,
+	0xe5, 0x3e, 0x69, 0xf0, 0xa5, 0xae, 0x5f, 0x67, 0xac, 0xfc, 0xeb, 0x8c, 0xfd, 0x5b, 0x16, 0x4c,
+	0xae, 0x2e, 0x1b, 0x4a, 0xe4, 0x05, 0x00, 0x7e, 0x07, 0xbb, 0x7b, 0x77, 0x5d, 0x3e, 0xd0, 0xf3,
+	0x37, 0x56, 0x55, 0x8a, 0x35, 0x0c, 0x74, 0x0e, 0x8a, 0xcd, 0x8e, 0x2f, 0x14, 0x9a, 0xa3, 0xf4,
+	0xc0, 0xbe, 0xd9, 0xf1, 0x31, 0x2d, 0xd3, 0x9c, 0x1c, 0x8a, 0x03, 0x3b, 0x39, 0xf4, 0x0d, 0x36,
+	0x80, 0xe6, 0x61, 0xf8, 0xde, 0x3d, 0xcf, 0xe5, 0x2e, 0x9d, 0xc2, 0x78, 0xe0, 0xee, 0xdd, 0x6a,
+	0x25, 0xc2, 0xbc, 0xdc, 0xfe, 0x52, 0x11, 0xe6, 0x56, 0x9b, 0xe4, 0xfe, 0xbb, 0x74, 0x6b, 0x1d,
+	0xd4, 0x45, 0xe3, 0x68, 0xaa, 0xa1, 0xa3, 0xba, 0xe1, 0xf4, 0x1f, 0x8f, 0x2d, 0x18, 0xe5, 0x26,
+	0x76, 0xd2, 0xc9, 0xf5, 0xb5, 0xac, 0xd6, 0xf3, 0x07, 0x64, 0x81, 0x9b, 0xea, 0x09, 0x1f, 0x3d,
+	0x75, 0xd2, 0x8a, 0x52, 0x2c, 0x89, 0xcf, 0x7d, 0x0c, 0xc6, 0x75, 0xcc, 0x23, 0x39, 0xc4, 0xfd,
+	0xe5, 0x22, 0x4c, 0xd3, 0x1e, 0x3c, 0xd2, 0x89, 0xb8, 0xdd, 0x3d, 0x11, 0xc7, 0xed, 0x14, 0xd5,
+	0x7f, 0x36, 0xde, 0x4a, 0xcf, 0xc6, 0xf3, 0x79, 0xb3, 0x71, 0xd2, 0x73, 0xf0, 0xed, 0x16, 0x9c,
+	0x5a, 0x6d, 0x06, 0x8d, 0xdd, 0x94, 0xe3, 0xd2, 0xcb, 0x30, 0x46, 0xf9, 0x78, 0x64, 0xf8, 0xd4,
+	0x1b, 0x51, 0x16, 0x04, 0x08, 0xeb, 0x78, 0x5a, 0xb5, 0xdb, 0xb7, 0xab, 0x95, 0xac, 0xe0, 0x0c,
+	0x02, 0x84, 0x75, 0x3c, 0xfb, 0x37, 0x2c, 0xb8, 0x70, 0x6d, 0x79, 0x25, 0x59, 0x8a, 0x5d, 0xf1,
+	0x21, 0x2e, 0xc3, 0x48, 0xdb, 0xd5, 0xba, 0x92, 0x28, 0x7c, 0x2b, 0xac, 0x17, 0x02, 0xfa, 0x5e,
+	0x89, 0x7d, 0xf2, 0x13, 0x16, 0x9c, 0xba, 0xe6, 0xc5, 0xf4, 0x58, 0x4e, 0x47, 0x2a, 0xa0, 0xe7,
+	0x72, 0xe4, 0xc5, 0x41, 0xb8, 0x9f, 0x8e, 0x54, 0x80, 0x15, 0x04, 0x6b, 0x58, 0xbc, 0xe5, 0x3d,
+	0x8f, 0x19, 0x77, 0x17, 0xcc, 0xa7, 0x2f, 0x2c, 0xca, 0xb1, 0xc2, 0xa0, 0x1f, 0xe6, 0x7a, 0x21,
+	0xd3, 0x1a, 0xee, 0x0b, 0x0e, 0xab, 0x3e, 0xac, 0x22, 0x01, 0x38, 0xc1, 0xa1, 0x17, 0xa8, 0xf9,
+	0x6b, 0xcd, 0x4e, 0x14, 0x93, 0x70, 0x2b, 0xca, 0xe1, 0x8e, 0x2f, 0x42, 0x99, 0x48, 0x1d, 0xbd,
+	0xe8, 0xb5, 0x12, 0x35, 0x95, 0xf2, 0x9e, 0x07, 0x4c, 0x50, 0x78, 0x03, 0xb8, 0x41, 0x1e, 0xcd,
+	0x8f, 0x6d, 0x15, 0x10, 0xd1, 0xdb, 0xd2, 0x23, 0x48, 0x30, 0x57, 0xf4, 0x95, 0x2e, 0x28, 0xce,
+	0xa8, 0x61, 0xff, 0xb0, 0x05, 0x67, 0xd4, 0x07, 0xbf, 0xe7, 0x3e, 0xd3, 0xfe, 0x99, 0x02, 0x4c,
+	0x5c, 0xdf, 0xd8, 0xa8, 0x5d, 0x23, 0xb1, 0x38, 0xb6, 0xfb, 0xbf, 0xbc, 0x63, 0xed, 0x01, 0xb1,
+	0xd7, 0x2d, 0xb0, 0x13, 0x7b, 0xcd, 0x05, 0x1e, 0x88, 0x68, 0xa1, 0xea, 0xc7, 0xb7, 0xc2, 0x7a,
+	0x1c, 0x7a, 0xfe, 0x76, 0xe6, 0x93, 0xa3, 0x14, 0x2e, 0x8a, 0x79, 0xc2, 0x05, 0x7a, 0x11, 0x46,
+	0x58, 0x24, 0x24, 0x39, 0x09, 0x8f, 0xab, 0x4b, 0x14, 0x2b, 0x3d, 0x3c, 0x98, 0x2f, 0xdf, 0xc6,
+	0x55, 0xfe, 0x07, 0x0b, 0x54, 0x74, 0x1b, 0xc6, 0x76, 0xe2, 0xb8, 0x7d, 0x9d, 0x38, 0x2e, 0xbd,
+	0x2d, 0x73, 0x76, 0x78, 0x31, 0x8b, 0x1d, 0xd2, 0x41, 0xe0, 0x68, 0x09, 0x07, 0x49, 0xca, 0x22,
+	0xac, 0xd3, 0xb1, 0xeb, 0x00, 0x09, 0xec, 0x98, 0xde, 0x4e, 0xec, 0xdf, 0xb7, 0x60, 0x94, 0x07,
+	0xa5, 0x08, 0xd1, 0xc7, 0x61, 0x88, 0xdc, 0x27, 0x0d, 0x21, 0x2a, 0x67, 0x76, 0x38, 0x91, 0xb4,
+	0xb8, 0x0e, 0x98, 0xfe, 0xc7, 0xac, 0x16, 0xba, 0x0e, 0xa3, 0xb4, 0xb7, 0xd7, 0x54, 0x84, 0x8e,
+	0x27, 0xf2, 0xbe, 0x58, 0x4d, 0x3b, 0x17, 0xce, 0x44, 0x11, 0x96, 0xd5, 0xd9, 0x83, 0x75, 0xa3,
+	0x5d, 0xa7, 0x1c, 0x3b, 0xee, 0x25, 0x58, 0x6c, 0x2c, 0xd7, 0x38, 0x92, 0xa0, 0xc6, 0x1f, 0xac,
+	0x65, 0x21, 0x4e, 0x88, 0xd8, 0x1b, 0x50, 0xa6, 0x93, 0xba, 0xd8, 0xf4, 0x9c, 0xde, 0x6f, 0xf0,
+	0xcf, 0x40, 0x59, 0xbe, 0xb0, 0x47, 0xc2, 0x19, 0x9d, 0x51, 0x95, 0x0f, 0xf0, 0x11, 0x4e, 0xe0,
+	0xf6, 0x16, 0x9c, 0x66, 0xf6, 0x92, 0x4e, 0xbc, 0x63, 0xec, 0xb1, 0xfe, 0x8b, 0xf9, 0x59, 0x71,
+	0xf3, 0xe4, 0x33, 0x33, 0xab, 0xf9, 0x7b, 0x8e, 0x4b, 0x8a, 0xc9, 0x2d, 0xd4, 0xfe, 0xea, 0x10,
+	0x3c, 0x5e, 0xad, 0xe7, 0xc7, 0x2b, 0x79, 0x15, 0xc6, 0xb9, 0x5c, 0x4a, 0x97, 0xb6, 0xd3, 0x14,
+	0xed, 0x2a, 0xe5, 0xef, 0x86, 0x06, 0xc3, 0x06, 0x26, 0xba, 0x00, 0x45, 0xef, 0x6d, 0x3f, 0xed,
+	0x0d, 0x55, 0x7d, 0x73, 0x1d, 0xd3, 0x72, 0x0a, 0xa6, 0x22, 0x2e, 0x3f, 0x3b, 0x14, 0x58, 0x89,
+	0xb9, 0xaf, 0xc3, 0xa4, 0x17, 0x35, 0x22, 0xaf, 0xea, 0x53, 0x3e, 0xa3, 0x71, 0x2a, 0xa5, 0x15,
+	0xa1, 0x9d, 0x56, 0x50, 0x9c, 0xc2, 0xd6, 0x0e, 0xb2, 0xe1, 0x81, 0xc5, 0xe4, 0xbe, 0xde, 0xd9,
+	0xf4, 0x06, 0xd0, 0x66, 0x5f, 0x17, 0x31, 0x2d, 0xbe, 0xb8, 0x01, 0xf0, 0x0f, 0x8e, 0xb0, 0x84,
+	0xd1, 0x2b, 0x67, 0x63, 0xc7, 0x69, 0x2f, 0x76, 0xe2, 0x9d, 0x8a, 0x17, 0x35, 0x82, 0x3d, 0x12,
+	0xee, 0x33, 0x6d, 0x41, 0x29, 0xb9, 0x72, 0x2a, 0xc0, 0xf2, 0xf5, 0xc5, 0x1a, 0xc5, 0xc4, 0xdd,
+	0x75, 0xd0, 0x22, 0x4c, 0xc9, 0xc2, 0x3a, 0x89, 0xd8, 0x11, 0x36, 0xc6, 0xc8, 0x28, 0xff, 0x24,
+	0x51, 0xac, 0x88, 0xa4, 0xf1, 0x4d, 0x49, 0x1a, 0x8e, 0x43, 0x92, 0x7e, 0x05, 0x26, 0x3c, 0xdf,
+	0x8b, 0x3d, 0x27, 0x0e, 0xf8, 0x13, 0x14, 0x57, 0x0c, 0x30, 0xdd, 0x7a, 0x55, 0x07, 0x60, 0x13,
+	0xcf, 0xfe, 0x6f, 0x43, 0x30, 0xc3, 0xa6, 0xed, 0xfd, 0x15, 0xf6, 0xf5, 0xb4, 0xc2, 0x6e, 0x77,
+	0xaf, 0xb0, 0xe3, 0xb8, 0x22, 0x3c, 0xf4, 0x32, 0xfb, 0x3c, 0x94, 0x95, 0x4b, 0x96, 0xf4, 0xc9,
+	0xb4, 0x72, 0x7c, 0x32, 0xfb, 0x4b, 0x1f, 0xd2, 0xaa, 0xad, 0x98, 0x69, 0xd5, 0xf6, 0xb7, 0x2d,
+	0x48, 0xde, 0x54, 0xd0, 0x75, 0x28, 0xb7, 0x03, 0x66, 0xac, 0x19, 0x4a, 0x0b, 0xe8, 0xc7, 0x33,
+	0x0f, 0x2a, 0x7e, 0x28, 0xf2, 0x8f, 0xaf, 0xc9, 0x1a, 0x38, 0xa9, 0x8c, 0x96, 0x60, 0xb4, 0x1d,
+	0x92, 0x7a, 0xcc, 0xc2, 0x96, 0xf4, 0xa5, 0xc3, 0xd7, 0x08, 0xc7, 0xc7, 0xb2, 0xa2, 0xfd, 0xb3,
+	0x16, 0x00, 0x37, 0x1c, 0x73, 0xfc, 0x6d, 0x72, 0x02, 0xea, 0xee, 0x0a, 0x0c, 0x45, 0x6d, 0xd2,
+	0xe8, 0x65, 0x46, 0x9b, 0xf4, 0xa7, 0xde, 0x26, 0x8d, 0x64, 0xc0, 0xe9, 0x3f, 0xcc, 0x6a, 0xdb,
+	0xdf, 0x09, 0x30, 0x99, 0xa0, 0x55, 0x63, 0xd2, 0x42, 0xcf, 0x19, 0x61, 0x0c, 0xce, 0xa5, 0xc2,
+	0x18, 0x94, 0x19, 0xb6, 0xa6, 0x59, 0xfd, 0x3c, 0x14, 0x5b, 0xce, 0x7d, 0xa1, 0x3a, 0x7b, 0xa6,
+	0x77, 0x37, 0x28, 0xfd, 0x85, 0x35, 0xe7, 0x3e, 0xbf, 0x24, 0x3e, 0x23, 0x17, 0xc8, 0x9a, 0x73,
+	0xff, 0x90, 0x1b, 0xcb, 0x32, 0x26, 0x75, 0xd3, 0x8b, 0xe2, 0x2f, 0xfc, 0xd7, 0xe4, 0x3f, 0x5b,
+	0x76, 0xb4, 0x11, 0xd6, 0x96, 0xe7, 0x0b, 0x9b, 0xa8, 0x81, 0xda, 0xf2, 0xfc, 0x74, 0x5b, 0x9e,
+	0x3f, 0x40, 0x5b, 0x9e, 0x8f, 0xde, 0x81, 0x51, 0x61, 0xb2, 0x28, 0xc2, 0x06, 0x5d, 0x1d, 0xa0,
+	0x3d, 0x61, 0xf1, 0xc8, 0xdb, 0xbc, 0x2a, 0x2f, 0xc1, 0xa2, 0xb4, 0x6f, 0xbb, 0xb2, 0x41, 0xf4,
+	0x37, 0x2c, 0x98, 0x14, 0xbf, 0x31, 0x79, 0xbb, 0x43, 0xa2, 0x58, 0xc8, 0x9e, 0x1f, 0x1d, 0xbc,
+	0x0f, 0xa2, 0x22, 0xef, 0xca, 0x47, 0x25, 0x9b, 0x35, 0x81, 0x7d, 0x7b, 0x94, 0xea, 0x05, 0xfa,
+	0x47, 0x16, 0x9c, 0x6e, 0x39, 0xf7, 0x79, 0x8b, 0xbc, 0x0c, 0x3b, 0xb1, 0x17, 0x88, 0xa7, 0xff,
+	0x8f, 0x0f, 0x36, 0xfd, 0x5d, 0xd5, 0x79, 0x27, 0xe5, 0xfb, 0xe4, 0xe9, 0x2c, 0x94, 0xbe, 0x5d,
+	0xcd, 0xec, 0xd7, 0xdc, 0x16, 0x94, 0xe4, 0x7a, 0xcb, 0x50, 0x35, 0x54, 0x74, 0xc1, 0xfa, 0xc8,
+	0x16, 0xa3, 0x7a, 0x78, 0x00, 0xda, 0x8e, 0x58, 0x6b, 0x8f, 0xb4, 0x9d, 0xcf, 0xc3, 0xb8, 0xbe,
+	0xc6, 0x1e, 0x69, 0x5b, 0x6f, 0xc3, 0xa9, 0x8c, 0xb5, 0xf4, 0x48, 0x9b, 0xbc, 0x07, 0xe7, 0x72,
+	0xd7, 0xc7, 0xa3, 0x6c, 0xd8, 0xfe, 0x19, 0x4b, 0xe7, 0x83, 0x27, 0xf0, 0xe6, 0xb0, 0x6c, 0xbe,
+	0x39, 0x5c, 0xec, 0xbd, 0x73, 0x72, 0x1e, 0x1e, 0xde, 0xd2, 0x3b, 0x4d, 0xb9, 0x3a, 0x7a, 0x03,
+	0x46, 0x9a, 0xb4, 0x44, 0x1a, 0xbe, 0xda, 0xfd, 0x77, 0x64, 0x22, 0x4b, 0xb1, 0xf2, 0x08, 0x0b,
+	0x0a, 0xf6, 0x2f, 0x58, 0x30, 0x74, 0x02, 0x23, 0x81, 0xcd, 0x91, 0x78, 0x2e, 0x97, 0xb4, 0x88,
+	0x68, 0xbc, 0x80, 0x9d, 0x7b, 0x2b, 0xf7, 0x63, 0xe2, 0x47, 0xec, 0xaa, 0x98, 0x39, 0x30, 0xff,
+	0x1f, 0x9c, 0xba, 0x19, 0x38, 0xee, 0x92, 0xd3, 0x74, 0xfc, 0x06, 0x09, 0xab, 0xfe, 0xf6, 0x91,
+	0x8c, 0xb6, 0x0b, 0xfd, 0x8c, 0xb6, 0xed, 0x1d, 0x40, 0x7a, 0x03, 0xc2, 0xfb, 0x05, 0xc3, 0xa8,
+	0xc7, 0x9b, 0x12, 0xc3, 0xff, 0x54, 0xb6, 0x68, 0xd6, 0xd5, 0x33, 0xcd, 0xaf, 0x83, 0x17, 0x60,
+	0x49, 0xc8, 0x7e, 0x15, 0x32, 0x5d, 0xe8, 0xfb, 0xab, 0x0d, 0xec, 0x4f, 0xc1, 0x0c, 0xab, 0x79,
+	0xc4, 0x2b, 0xad, 0x9d, 0xd2, 0x4a, 0x66, 0x04, 0xd7, 0xb3, 0xbf, 0x68, 0xc1, 0xd4, 0x7a, 0x2a,
+	0xe6, 0xd8, 0x65, 0xf6, 0x00, 0x9a, 0xa1, 0x0c, 0xaf, 0xb3, 0x52, 0x2c, 0xa0, 0xc7, 0xae, 0x83,
+	0xfa, 0x73, 0x0b, 0x92, 0xa8, 0x16, 0x27, 0x20, 0x78, 0x2d, 0x1b, 0x82, 0x57, 0xa6, 0x6e, 0x44,
+	0x75, 0x27, 0x4f, 0xee, 0x42, 0x37, 0x54, 0xbc, 0xa7, 0x1e, 0x6a, 0x91, 0x84, 0x0c, 0x8f, 0x0e,
+	0x34, 0x69, 0x06, 0x85, 0x92, 0x11, 0xa0, 0xec, 0xff, 0x5c, 0x00, 0xa4, 0x70, 0x07, 0x8e, 0x47,
+	0xd5, 0x5d, 0xe3, 0x78, 0xe2, 0x51, 0xed, 0x01, 0x62, 0x4f, 0xf8, 0xa1, 0xe3, 0x47, 0x9c, 0xac,
+	0x27, 0xb4, 0x6e, 0x47, 0xb3, 0x0f, 0x98, 0x13, 0x4d, 0xa2, 0x9b, 0x5d, 0xd4, 0x70, 0x46, 0x0b,
+	0x9a, 0x69, 0xc6, 0xf0, 0xa0, 0xa6, 0x19, 0x23, 0x7d, 0x3c, 0xdc, 0x7e, 0xda, 0x82, 0x09, 0x35,
+	0x4c, 0xef, 0x11, 0xfb, 0x73, 0xd5, 0x9f, 0x1c, 0xd6, 0x57, 0xd3, 0xba, 0xcc, 0x8e, 0x84, 0x6f,
+	0x64, 0x9e, 0x8a, 0x4e, 0xd3, 0x7b, 0x87, 0xa8, 0x68, 0x80, 0xf3, 0xc2, 0xf3, 0x50, 0x94, 0x1e,
+	0x1e, 0xcc, 0x4f, 0xa8, 0x7f, 0x3c, 0xfa, 0x70, 0x52, 0xc5, 0xfe, 0x31, 0xba, 0xd9, 0xcd, 0xa5,
+	0x88, 0x5e, 0x86, 0xe1, 0xf6, 0x8e, 0x13, 0x91, 0x94, 0x9f, 0xce, 0x70, 0x8d, 0x16, 0x1e, 0x1e,
+	0xcc, 0x4f, 0xaa, 0x0a, 0xac, 0x04, 0x73, 0xec, 0xc1, 0xa3, 0x7c, 0x75, 0x2f, 0xce, 0xbe, 0x51,
+	0xbe, 0xfe, 0xd8, 0x82, 0xa1, 0xf5, 0xc0, 0x3d, 0x09, 0x16, 0xf0, 0xba, 0xc1, 0x02, 0xce, 0xe7,
+	0x05, 0x86, 0xcf, 0xdd, 0xfd, 0xab, 0xa9, 0xdd, 0x7f, 0x31, 0x97, 0x42, 0xef, 0x8d, 0xdf, 0x82,
+	0x31, 0x16, 0x6e, 0x5e, 0xf8, 0x24, 0xbd, 0x68, 0x6c, 0xf8, 0xf9, 0xd4, 0x86, 0x9f, 0xd2, 0x50,
+	0xb5, 0x9d, 0xfe, 0x34, 0x8c, 0x0a, 0x27, 0x97, 0xb4, 0xc3, 0xa7, 0xc0, 0xc5, 0x12, 0x6e, 0xff,
+	0x48, 0x11, 0x8c, 0xf0, 0xf6, 0xe8, 0x97, 0x2c, 0x58, 0x08, 0xb9, 0xf1, 0xab, 0x5b, 0xe9, 0x84,
+	0x9e, 0xbf, 0x5d, 0x6f, 0xec, 0x10, 0xb7, 0xd3, 0xf4, 0xfc, 0xed, 0xea, 0xb6, 0x1f, 0xa8, 0xe2,
+	0x95, 0xfb, 0xa4, 0xd1, 0x61, 0xcf, 0x57, 0x7d, 0x62, 0xe9, 0x2b, 0x23, 0xf2, 0x17, 0x1e, 0x1c,
+	0xcc, 0x2f, 0xe0, 0x23, 0xd1, 0xc6, 0x47, 0xec, 0x0b, 0xfa, 0x0d, 0x0b, 0xae, 0xf2, 0xa8, 0xef,
+	0x83, 0xf7, 0xbf, 0xc7, 0x3d, 0xb7, 0x26, 0x49, 0x25, 0x44, 0x36, 0x48, 0xd8, 0x5a, 0x7a, 0x45,
+	0x0c, 0xe8, 0xd5, 0xda, 0xd1, 0xda, 0xc2, 0x47, 0xed, 0x9c, 0xfd, 0x2f, 0x8b, 0x30, 0x21, 0xa2,
+	0x41, 0x89, 0x33, 0xe0, 0x65, 0x63, 0x49, 0x3c, 0x91, 0x5a, 0x12, 0x33, 0x06, 0xf2, 0xf1, 0xb0,
+	0xff, 0x08, 0x66, 0x28, 0x73, 0xbe, 0x4e, 0x9c, 0x30, 0xde, 0x24, 0x0e, 0xb7, 0xb8, 0x2a, 0x1e,
+	0x99, 0xfb, 0x2b, 0xc5, 0xda, 0xcd, 0x34, 0x31, 0xdc, 0x4d, 0xff, 0xeb, 0xe9, 0xcc, 0xf1, 0x61,
+	0xba, 0x2b, 0xa0, 0xd7, 0xa7, 0xa1, 0xac, 0x3c, 0x34, 0x04, 0xd3, 0xe9, 0x1d, 0x17, 0x2f, 0x4d,
+	0x81, 0x2b, 0xbf, 0x12, 0xef, 0xa0, 0x84, 0x9c, 0xfd, 0x8f, 0x0b, 0x46, 0x83, 0x7c, 0x12, 0xd7,
+	0xa1, 0xe4, 0x44, 0x91, 0xb7, 0xed, 0x13, 0x57, 0xec, 0xd8, 0x0f, 0xe6, 0xed, 0x58, 0xa3, 0x19,
+	0xe6, 0x25, 0xb3, 0x28, 0x6a, 0x62, 0x45, 0x03, 0x5d, 0xe7, 0x76, 0x6d, 0x7b, 0xf2, 0xa6, 0x36,
+	0x18, 0x35, 0x90, 0x96, 0x6f, 0x7b, 0x04, 0x8b, 0xfa, 0xe8, 0x33, 0xdc, 0xf0, 0xf0, 0x86, 0x1f,
+	0xdc, 0xf3, 0xaf, 0x05, 0x81, 0x8c, 0xb8, 0x30, 0x18, 0xc1, 0x19, 0x69, 0x6e, 0xa8, 0xaa, 0x63,
+	0x93, 0xda, 0x60, 0x11, 0x32, 0xbf, 0x05, 0x4e, 0x51, 0xd2, 0xa6, 0x43, 0x74, 0x84, 0x08, 0x4c,
+	0x89, 0x50, 0x63, 0xb2, 0x4c, 0x8c, 0x5d, 0xe6, 0x25, 0xcc, 0xac, 0x9d, 0x68, 0x80, 0x6f, 0x98,
+	0x24, 0x70, 0x9a, 0xa6, 0xfd, 0xe3, 0x16, 0x30, 0xe7, 0xd0, 0x13, 0x90, 0x47, 0x3e, 0x61, 0xca,
+	0x23, 0xb3, 0x79, 0x83, 0x9c, 0x23, 0x8a, 0xbc, 0xc4, 0x57, 0x56, 0x2d, 0x0c, 0xee, 0xef, 0x0b,
+	0xa3, 0x8f, 0xfe, 0xf7, 0x0f, 0xfb, 0xff, 0x58, 0x9c, 0x89, 0x29, 0xff, 0x09, 0xf4, 0xad, 0x50,
+	0x6a, 0x38, 0x6d, 0xa7, 0xc1, 0x73, 0xb1, 0xe4, 0xea, 0xe2, 0x8c, 0x4a, 0x0b, 0xcb, 0xa2, 0x06,
+	0xd7, 0x2d, 0xc9, 0x90, 0x75, 0x25, 0x59, 0xdc, 0x57, 0x9f, 0xa4, 0x9a, 0x9c, 0xdb, 0x85, 0x09,
+	0x83, 0xd8, 0x23, 0x55, 0x44, 0x7c, 0x2b, 0x3f, 0x62, 0x55, 0x88, 0xc5, 0x16, 0xcc, 0xf8, 0xda,
+	0x7f, 0x7a, 0xa0, 0xc8, 0xcb, 0xe5, 0x07, 0xfb, 0x1d, 0xa2, 0xec, 0xf4, 0xd1, 0xfc, 0x4e, 0x53,
+	0x64, 0x70, 0x37, 0x65, 0xfb, 0x47, 0x2d, 0x78, 0x4c, 0x47, 0xd4, 0x5c, 0x5b, 0xfa, 0x69, 0xf7,
+	0x2b, 0x50, 0x0a, 0xda, 0x24, 0x74, 0xe2, 0x20, 0x14, 0xa7, 0xc6, 0x15, 0x39, 0xe8, 0xb7, 0x44,
+	0xf9, 0xa1, 0x88, 0x64, 0x2e, 0xa9, 0xcb, 0x72, 0xac, 0x6a, 0xd2, 0xdb, 0x27, 0x1b, 0x8c, 0x48,
+	0x38, 0x31, 0x31, 0x1e, 0xc0, 0x1e, 0xba, 0x23, 0x2c, 0x20, 0xf6, 0x57, 0x2d, 0xbe, 0xb0, 0xf4,
+	0xae, 0xa3, 0xb7, 0x61, 0xba, 0xe5, 0xc4, 0x8d, 0x9d, 0x95, 0xfb, 0xed, 0x90, 0xbf, 0x95, 0xc8,
+	0x71, 0x7a, 0xa6, 0xdf, 0x38, 0x69, 0x1f, 0x99, 0xd8, 0x52, 0xae, 0xa5, 0x88, 0xe1, 0x2e, 0xf2,
+	0x68, 0x13, 0xc6, 0x58, 0x19, 0xf3, 0xcf, 0x8b, 0x7a, 0x89, 0x06, 0x79, 0xad, 0x29, 0x5b, 0x81,
+	0xb5, 0x84, 0x0e, 0xd6, 0x89, 0xda, 0x3f, 0x55, 0xe4, 0xbb, 0x9d, 0x89, 0xf2, 0x4f, 0xc3, 0x68,
+	0x3b, 0x70, 0x97, 0xab, 0x15, 0x2c, 0x66, 0x41, 0x1d, 0x23, 0x35, 0x5e, 0x8c, 0x25, 0x1c, 0x5d,
+	0x81, 0x92, 0xf8, 0x29, 0xdf, 0xb6, 0x18, 0x6f, 0x16, 0x78, 0x11, 0x56, 0x50, 0xf4, 0x02, 0x40,
+	0x3b, 0x0c, 0xf6, 0x3c, 0x97, 0xc5, 0x8d, 0x28, 0x9a, 0x66, 0x3e, 0x35, 0x05, 0xc1, 0x1a, 0x16,
+	0x7a, 0x0d, 0x26, 0x3a, 0x7e, 0xc4, 0xc5, 0x11, 0x2d, 0x4a, 0xac, 0x32, 0x40, 0xb9, 0xad, 0x03,
+	0xb1, 0x89, 0x8b, 0x16, 0x61, 0x24, 0x76, 0x98, 0xd9, 0xca, 0x70, 0xbe, 0xbd, 0xed, 0x06, 0xc5,
+	0xd0, 0xd3, 0x7e, 0xd0, 0x0a, 0x58, 0x54, 0x44, 0x9f, 0x96, 0xae, 0xb2, 0x9c, 0xb1, 0x0b, 0x43,
+	0xf7, 0xc1, 0x0e, 0x01, 0xcd, 0x51, 0x56, 0x18, 0xd0, 0x1b, 0xb4, 0xd0, 0xc7, 0x00, 0xc8, 0xfd,
+	0x98, 0x84, 0xbe, 0xd3, 0x54, 0x56, 0x61, 0x4a, 0x2e, 0xa8, 0x04, 0xeb, 0x41, 0x7c, 0x3b, 0x22,
+	0x2b, 0x0a, 0x03, 0x6b, 0xd8, 0xf6, 0x6f, 0x94, 0x01, 0x12, 0xb9, 0x1d, 0xbd, 0xd3, 0xc5, 0xb8,
+	0x9e, 0xed, 0x2d, 0xe9, 0x1f, 0x1f, 0xd7, 0x42, 0xdf, 0x65, 0xc1, 0x98, 0xd3, 0x6c, 0x06, 0x0d,
+	0x87, 0xc7, 0xf1, 0x2d, 0xf4, 0x66, 0x9c, 0xa2, 0xfd, 0xc5, 0xa4, 0x06, 0xef, 0xc2, 0x8b, 0x72,
+	0x85, 0x6a, 0x90, 0xbe, 0xbd, 0xd0, 0x1b, 0x46, 0x1f, 0x91, 0x57, 0xc5, 0xa2, 0x31, 0x94, 0xea,
+	0xaa, 0x58, 0x66, 0x67, 0x84, 0x7e, 0x4b, 0xbc, 0x6d, 0xdc, 0x12, 0x87, 0xf2, 0x7d, 0x01, 0x0d,
+	0xf1, 0xb5, 0xdf, 0x05, 0x11, 0xd5, 0xf4, 0xb8, 0x00, 0xc3, 0xf9, 0x8e, 0x77, 0xda, 0x3d, 0xa9,
+	0x4f, 0x4c, 0x80, 0xcf, 0xc3, 0x94, 0x6b, 0x0a, 0x01, 0x62, 0x25, 0x3e, 0x95, 0x47, 0x37, 0x25,
+	0x33, 0x24, 0xc7, 0x7e, 0x0a, 0x80, 0xd3, 0x84, 0x51, 0x8d, 0x87, 0x89, 0xa8, 0xfa, 0x5b, 0x81,
+	0x70, 0xb6, 0xb0, 0x73, 0xe7, 0x72, 0x3f, 0x8a, 0x49, 0x8b, 0x62, 0x26, 0xa7, 0xfb, 0xba, 0xa8,
+	0x8b, 0x15, 0x15, 0xf4, 0x06, 0x8c, 0x30, 0xcf, 0xab, 0x68, 0xb6, 0x94, 0xaf, 0x2b, 0x36, 0xe3,
+	0x9e, 0x25, 0x1b, 0x92, 0xfd, 0x8d, 0xb0, 0xa0, 0x80, 0xae, 0x4b, 0xbf, 0xc6, 0xa8, 0xea, 0xdf,
+	0x8e, 0x08, 0xf3, 0x6b, 0x2c, 0x2f, 0x7d, 0x30, 0x71, 0x59, 0xe4, 0xe5, 0x99, 0xc9, 0xc1, 0x8c,
+	0x9a, 0x54, 0x8a, 0x12, 0xff, 0x65, 0xce, 0xb1, 0x59, 0xc8, 0xef, 0x9e, 0x99, 0x97, 0x2c, 0x19,
+	0xce, 0x3b, 0x26, 0x09, 0x9c, 0xa6, 0x49, 0x25, 0x52, 0xbe, 0xeb, 0x85, 0xbb, 0x46, 0x3f, 0xde,
+	0xc1, 0x2f, 0xe2, 0xec, 0x34, 0xe2, 0x25, 0x58, 0xd4, 0x3f, 0x51, 0xf1, 0x60, 0xce, 0x87, 0xe9,
+	0xf4, 0x16, 0x7d, 0xa4, 0xe2, 0xc8, 0xef, 0x0f, 0xc1, 0xa4, 0xb9, 0xa4, 0xd0, 0x55, 0x28, 0x0b,
+	0x22, 0x2a, 0x4f, 0x80, 0xda, 0x25, 0x6b, 0x12, 0x80, 0x13, 0x1c, 0x96, 0x1e, 0x82, 0x55, 0xd7,
+	0xcc, 0x6c, 0x93, 0xf4, 0x10, 0x0a, 0x82, 0x35, 0x2c, 0x7a, 0xb1, 0xda, 0x0c, 0x82, 0x58, 0x1d,
+	0x48, 0x6a, 0xdd, 0x2d, 0xb1, 0x52, 0x2c, 0xa0, 0xf4, 0x20, 0xda, 0x25, 0xa1, 0x4f, 0x9a, 0x66,
+	0x44, 0x61, 0x75, 0x10, 0xdd, 0xd0, 0x81, 0xd8, 0xc4, 0xa5, 0xc7, 0x69, 0x10, 0xb1, 0x85, 0x2c,
+	0xae, 0x6f, 0x89, 0xd9, 0x72, 0x9d, 0xbb, 0x56, 0x4b, 0x38, 0xfa, 0x14, 0x3c, 0xa6, 0xa2, 0x26,
+	0x61, 0xfe, 0x0e, 0x21, 0x5b, 0x1c, 0x31, 0xb4, 0x2d, 0x8f, 0x2d, 0x67, 0xa3, 0xe1, 0xbc, 0xfa,
+	0xe8, 0x75, 0x98, 0x14, 0x22, 0xbe, 0xa4, 0x38, 0x6a, 0x9a, 0xc6, 0xdc, 0x30, 0xa0, 0x38, 0x85,
+	0x2d, 0x63, 0x22, 0x33, 0x29, 0x5b, 0x52, 0x28, 0x75, 0xc7, 0x44, 0xd6, 0xe1, 0xb8, 0xab, 0x06,
+	0x5a, 0x84, 0x29, 0x2e, 0x83, 0x79, 0xfe, 0x36, 0x9f, 0x13, 0xe1, 0x4d, 0xa5, 0xb6, 0xd4, 0x2d,
+	0x13, 0x8c, 0xd3, 0xf8, 0xe8, 0x55, 0x18, 0x77, 0xc2, 0xc6, 0x8e, 0x17, 0x93, 0x46, 0xdc, 0x09,
+	0xb9, 0x9b, 0x95, 0x66, 0x5b, 0xb4, 0xa8, 0xc1, 0xb0, 0x81, 0x69, 0xbf, 0x03, 0xa7, 0x32, 0x62,
+	0x2e, 0xd0, 0x85, 0xe3, 0xb4, 0x3d, 0xf9, 0x4d, 0x29, 0x03, 0xe4, 0xc5, 0x5a, 0x55, 0x7e, 0x8d,
+	0x86, 0x45, 0x57, 0x27, 0x8b, 0xcd, 0xa0, 0xa5, 0x18, 0x54, 0xab, 0x73, 0x55, 0x02, 0x70, 0x82,
+	0x63, 0xff, 0xcf, 0x02, 0x4c, 0x65, 0xbc, 0xad, 0xb0, 0x34, 0x77, 0xa9, 0x4b, 0x4a, 0x92, 0xd5,
+	0xce, 0x0c, 0xb1, 0x5d, 0x38, 0x42, 0x88, 0xed, 0x62, 0xbf, 0x10, 0xdb, 0x43, 0xef, 0x26, 0xc4,
+	0xb6, 0x39, 0x62, 0xc3, 0x03, 0x8d, 0x58, 0x46, 0x58, 0xee, 0x91, 0x23, 0x86, 0xe5, 0x36, 0x06,
+	0x7d, 0x74, 0x80, 0x41, 0xff, 0x81, 0x02, 0x4c, 0xa7, 0x6d, 0x20, 0x4f, 0x40, 0x6f, 0xfb, 0x86,
+	0xa1, 0xb7, 0xcd, 0x4e, 0x1a, 0x99, 0xb6, 0xcc, 0xcc, 0xd3, 0xe1, 0xe2, 0x94, 0x0e, 0xf7, 0xc3,
+	0x03, 0x51, 0xeb, 0xad, 0xcf, 0xfd, 0xbb, 0x05, 0x38, 0x93, 0xae, 0xb2, 0xdc, 0x74, 0xbc, 0xd6,
+	0x09, 0x8c, 0xcd, 0x2d, 0x63, 0x6c, 0x9e, 0x1b, 0xe4, 0x6b, 0x58, 0xd7, 0x72, 0x07, 0xe8, 0x6e,
+	0x6a, 0x80, 0xae, 0x0e, 0x4e, 0xb2, 0xf7, 0x28, 0x7d, 0xa5, 0x08, 0x17, 0x33, 0xeb, 0x25, 0x6a,
+	0xcf, 0x55, 0x43, 0xed, 0xf9, 0x42, 0x4a, 0xed, 0x69, 0xf7, 0xae, 0x7d, 0x3c, 0x7a, 0x50, 0xe1,
+	0x21, 0xcb, 0x02, 0x08, 0x3c, 0xa4, 0x0e, 0xd4, 0xf0, 0x90, 0x55, 0x84, 0xb0, 0x49, 0xf7, 0xeb,
+	0x49, 0xf7, 0xf9, 0x6f, 0x2d, 0x38, 0x97, 0x39, 0x37, 0x27, 0xa0, 0xeb, 0x5a, 0x37, 0x75, 0x5d,
+	0x4f, 0x0f, 0xbc, 0x5a, 0x73, 0x94, 0x5f, 0xbf, 0x36, 0x94, 0xf3, 0x2d, 0xec, 0x26, 0x7f, 0x0b,
+	0xc6, 0x9c, 0x46, 0x83, 0x44, 0xd1, 0x5a, 0xe0, 0xaa, 0x28, 0xc2, 0xcf, 0xb1, 0x7b, 0x56, 0x52,
+	0x7c, 0x78, 0x30, 0x3f, 0x97, 0x26, 0x91, 0x80, 0xb1, 0x4e, 0x01, 0x7d, 0x06, 0x4a, 0x91, 0x38,
+	0x37, 0xc5, 0xdc, 0xbf, 0x38, 0xe0, 0xe0, 0x38, 0x9b, 0xa4, 0x69, 0x86, 0x39, 0x52, 0x9a, 0x0a,
+	0x45, 0xd2, 0x0c, 0x89, 0x52, 0x38, 0xd6, 0x90, 0x28, 0x2f, 0x00, 0xec, 0xa9, 0xcb, 0x40, 0x5a,
+	0xff, 0xa0, 0x5d, 0x13, 0x34, 0x2c, 0xf4, 0x4d, 0x30, 0x1d, 0xf1, 0x38, 0x80, 0xcb, 0x4d, 0x27,
+	0x62, 0x6e, 0x2e, 0x62, 0x15, 0xb2, 0x50, 0x4a, 0xf5, 0x14, 0x0c, 0x77, 0x61, 0xa3, 0x55, 0xd9,
+	0x2a, 0x0b, 0x5a, 0xc8, 0x17, 0xe6, 0xe5, 0xa4, 0x45, 0x91, 0x64, 0xf7, 0x74, 0x7a, 0xf8, 0xd9,
+	0xc0, 0x6b, 0x35, 0xd1, 0x67, 0x00, 0xe8, 0xf2, 0x11, 0x7a, 0x88, 0xd1, 0x7c, 0xe6, 0x49, 0xb9,
+	0x8a, 0x9b, 0x69, 0x95, 0xcb, 0x7c, 0x53, 0x2b, 0x8a, 0x08, 0xd6, 0x08, 0xda, 0x3f, 0x30, 0x04,
+	0x8f, 0xf7, 0xe0, 0x91, 0x68, 0xd1, 0x7c, 0x87, 0x7d, 0x26, 0x7d, 0xb9, 0x9e, 0xcb, 0xac, 0x6c,
+	0xdc, 0xb6, 0x53, 0x4b, 0xb1, 0xf0, 0xae, 0x97, 0xe2, 0xf7, 0x5a, 0x9a, 0xda, 0x83, 0xdb, 0x6a,
+	0x7e, 0xe2, 0x88, 0xbc, 0xff, 0x18, 0xf5, 0x20, 0x5b, 0x19, 0xca, 0x84, 0x17, 0x06, 0xee, 0xce,
+	0xc0, 0xda, 0x85, 0x93, 0xd5, 0x12, 0x7f, 0xc1, 0x82, 0x27, 0x32, 0xfb, 0x6b, 0x58, 0xe4, 0x5c,
+	0x85, 0x72, 0x83, 0x16, 0x6a, 0xae, 0x88, 0x89, 0x8f, 0xb6, 0x04, 0xe0, 0x04, 0xc7, 0x30, 0xbc,
+	0x29, 0xf4, 0x35, 0xbc, 0xf9, 0x65, 0x0b, 0xba, 0xf6, 0xc7, 0x09, 0x30, 0xea, 0xaa, 0xc9, 0xa8,
+	0x3f, 0x38, 0xc8, 0x5c, 0xe6, 0xf0, 0xe8, 0x3f, 0x9a, 0x82, 0xb3, 0x39, 0xae, 0x38, 0x7b, 0x30,
+	0xb3, 0xdd, 0x20, 0xa6, 0x93, 0xa7, 0xf8, 0x98, 0x4c, 0x7f, 0xd8, 0x9e, 0x1e, 0xa1, 0x2c, 0x63,
+	0xe6, 0x4c, 0x17, 0x0a, 0xee, 0x6e, 0x02, 0x7d, 0xc1, 0x82, 0xd3, 0xce, 0xbd, 0xa8, 0x2b, 0xc5,
+	0xbe, 0x58, 0x33, 0x2f, 0x65, 0x2a, 0x41, 0xfa, 0xa4, 0xe4, 0xe7, 0x29, 0x44, 0xb3, 0xb0, 0x70,
+	0x66, 0x5b, 0x08, 0x8b, 0xb8, 0xf2, 0x54, 0x9c, 0xef, 0xe1, 0x86, 0x9c, 0xe5, 0x33, 0xc5, 0x4f,
+	0x10, 0x09, 0xc1, 0x8a, 0x0e, 0xfa, 0x1c, 0x94, 0xb7, 0xa5, 0x23, 0x63, 0xc6, 0x09, 0x95, 0x0c,
+	0x64, 0x6f, 0xf7, 0x4e, 0xfe, 0x92, 0xa9, 0x90, 0x70, 0x42, 0x14, 0xbd, 0x0e, 0x45, 0x7f, 0x2b,
+	0xea, 0x95, 0x85, 0x33, 0x65, 0xb2, 0xc6, 0x9d, 0xfd, 0xd7, 0x57, 0xeb, 0x98, 0x56, 0x44, 0xd7,
+	0xa1, 0x18, 0x6e, 0xba, 0x42, 0x83, 0x97, 0xc9, 0xc3, 0xf1, 0x52, 0x25, 0xa7, 0x57, 0x8c, 0x12,
+	0x5e, 0xaa, 0x60, 0x4a, 0x02, 0xd5, 0x60, 0x98, 0xf9, 0xaf, 0x88, 0xf3, 0x20, 0x53, 0xf2, 0xed,
+	0xe1, 0x07, 0xc6, 0x23, 0x02, 0x30, 0x04, 0xcc, 0x09, 0xa1, 0x0d, 0x18, 0x69, 0xb0, 0x8c, 0x8d,
+	0x22, 0x1e, 0xd9, 0x47, 0x32, 0x75, 0x75, 0x3d, 0x52, 0x59, 0x0a, 0xd5, 0x15, 0xc3, 0xc0, 0x82,
+	0x16, 0xa3, 0x4a, 0xda, 0x3b, 0x5b, 0x91, 0xc8, 0x30, 0x9c, 0x4d, 0xb5, 0x47, 0x86, 0x56, 0x41,
+	0x95, 0x61, 0x60, 0x41, 0x0b, 0x7d, 0x0c, 0x0a, 0x5b, 0x0d, 0xe1, 0x9b, 0x92, 0xa9, 0xb4, 0x33,
+	0xe3, 0x35, 0x2c, 0x8d, 0x3c, 0x38, 0x98, 0x2f, 0xac, 0x2e, 0xe3, 0xc2, 0x56, 0x03, 0xad, 0xc3,
+	0xe8, 0x16, 0xf7, 0xf0, 0x16, 0x7a, 0xb9, 0xa7, 0xb2, 0x9d, 0xcf, 0xbb, 0x9c, 0xc0, 0xb9, 0x5b,
+	0x86, 0x00, 0x60, 0x49, 0x84, 0x85, 0x69, 0x57, 0x9e, 0xea, 0x22, 0x74, 0xd7, 0xc2, 0xd1, 0xa2,
+	0x0b, 0xf0, 0xf3, 0x39, 0xf1, 0x77, 0xc7, 0x1a, 0x45, 0xba, 0xaa, 0x1d, 0x99, 0xe6, 0x5d, 0x84,
+	0x62, 0xc9, 0x5c, 0xd5, 0x7d, 0x32, 0xe0, 0xf3, 0x55, 0xad, 0x90, 0x70, 0x42, 0x14, 0xed, 0xc2,
+	0xc4, 0x5e, 0xd4, 0xde, 0x21, 0x72, 0x4b, 0xb3, 0xc8, 0x2c, 0x39, 0x47, 0xd8, 0x1d, 0x81, 0xe8,
+	0x85, 0x71, 0xc7, 0x69, 0x76, 0x71, 0x21, 0xf6, 0xfc, 0x7d, 0x47, 0x27, 0x86, 0x4d, 0xda, 0x74,
+	0xf8, 0xdf, 0xee, 0x04, 0x9b, 0xfb, 0x31, 0x11, 0x11, 0xb7, 0x32, 0x87, 0xff, 0x4d, 0x8e, 0xd2,
+	0x3d, 0xfc, 0x02, 0x80, 0x25, 0x11, 0x74, 0x47, 0x0c, 0x0f, 0xe3, 0x9e, 0xd3, 0xf9, 0x61, 0x31,
+	0x17, 0x25, 0x52, 0xce, 0xa0, 0x30, 0x6e, 0x99, 0x90, 0x62, 0x5c, 0xb2, 0xbd, 0x13, 0xc4, 0x81,
+	0x9f, 0xe2, 0xd0, 0x33, 0xf9, 0x5c, 0xb2, 0x96, 0x81, 0xdf, 0xcd, 0x25, 0xb3, 0xb0, 0x70, 0x66,
+	0x5b, 0xc8, 0x85, 0xc9, 0x76, 0x10, 0xc6, 0xf7, 0x82, 0x50, 0xae, 0x2f, 0xd4, 0x43, 0xaf, 0x60,
+	0x60, 0x8a, 0x16, 0x59, 0x30, 0x3b, 0x13, 0x82, 0x53, 0x34, 0xd1, 0x27, 0x61, 0x34, 0x6a, 0x38,
+	0x4d, 0x52, 0xbd, 0x35, 0x7b, 0x2a, 0xff, 0xf8, 0xa9, 0x73, 0x94, 0x9c, 0xd5, 0xc5, 0x63, 0xba,
+	0x73, 0x14, 0x2c, 0xc9, 0xa1, 0x55, 0x18, 0x66, 0x69, 0xb8, 0x58, 0x78, 0xb8, 0x9c, 0xe8, 0x9e,
+	0x5d, 0x06, 0xc4, 0x9c, 0x37, 0xb1, 0x62, 0xcc, 0xab, 0xd3, 0x3d, 0x20, 0xc4, 0xeb, 0x20, 0x9a,
+	0x3d, 0x93, 0xbf, 0x07, 0x84, 0x54, 0x7e, 0xab, 0xde, 0x6b, 0x0f, 0x28, 0x24, 0x9c, 0x10, 0xa5,
+	0x9c, 0x99, 0x72, 0xd3, 0xb3, 0x3d, 0x2c, 0x5f, 0x72, 0x79, 0x29, 0xe3, 0xcc, 0x94, 0x93, 0x52,
+	0x12, 0xf6, 0xef, 0x8e, 0x76, 0xcb, 0x2c, 0xec, 0x42, 0xf6, 0x1d, 0x56, 0xd7, 0x5b, 0xdd, 0x47,
+	0x07, 0xd5, 0x0f, 0x1d, 0xa3, 0xb4, 0xfa, 0x05, 0x0b, 0xce, 0xb6, 0x33, 0x3f, 0x44, 0x08, 0x00,
+	0x83, 0xa9, 0x99, 0xf8, 0xa7, 0xab, 0x50, 0x82, 0xd9, 0x70, 0x9c, 0xd3, 0x52, 0xfa, 0x46, 0x50,
+	0x7c, 0xd7, 0x37, 0x82, 0x35, 0x28, 0x31, 0x21, 0xb3, 0x4f, 0x06, 0xe3, 0xf4, 0xc5, 0x88, 0x89,
+	0x12, 0xcb, 0xa2, 0x22, 0x56, 0x24, 0xd0, 0xf7, 0x59, 0x70, 0x21, 0xdd, 0x75, 0x4c, 0x18, 0x58,
+	0xc4, 0x1f, 0xe4, 0x77, 0xc1, 0x55, 0xf1, 0xfd, 0x17, 0x6a, 0xbd, 0x90, 0x0f, 0xfb, 0x21, 0xe0,
+	0xde, 0x8d, 0xa1, 0x4a, 0xc6, 0x65, 0x74, 0xc4, 0x54, 0xc0, 0x0f, 0x70, 0x21, 0x7d, 0x09, 0xc6,
+	0x5b, 0x41, 0xc7, 0x8f, 0x85, 0xa1, 0x8c, 0x78, 0xb4, 0x67, 0x8f, 0xd5, 0x6b, 0x5a, 0x39, 0x36,
+	0xb0, 0x52, 0xd7, 0xd8, 0xd2, 0x43, 0x5f, 0x63, 0xdf, 0x82, 0x71, 0x5f, 0xb3, 0xec, 0x14, 0xf2,
+	0xc0, 0xe5, 0xfc, 0xd8, 0xa1, 0xba, 0x1d, 0x28, 0xef, 0xa5, 0x5e, 0x82, 0x0d, 0x6a, 0x27, 0x7b,
+	0x37, 0xfa, 0x49, 0x2b, 0x43, 0xa8, 0xe7, 0xb7, 0xe5, 0x8f, 0x9b, 0xb7, 0xe5, 0xcb, 0xe9, 0xdb,
+	0x72, 0x97, 0xf2, 0xd5, 0xb8, 0x28, 0x0f, 0x9e, 0x1a, 0x65, 0xd0, 0x30, 0x81, 0x76, 0x13, 0x2e,
+	0xf5, 0x3b, 0x96, 0x98, 0xc5, 0x94, 0xab, 0x9e, 0xda, 0x12, 0x8b, 0x29, 0xb7, 0x5a, 0xc1, 0x0c,
+	0x32, 0x68, 0x1c, 0x19, 0xfb, 0x7f, 0x58, 0x50, 0xac, 0x05, 0xee, 0x09, 0x28, 0x93, 0x3f, 0x61,
+	0x28, 0x93, 0x1f, 0xcf, 0x3e, 0x10, 0xdd, 0x5c, 0xd5, 0xf1, 0x4a, 0x4a, 0x75, 0x7c, 0x21, 0x8f,
+	0x40, 0x6f, 0x45, 0xf1, 0x8f, 0x15, 0x61, 0xac, 0x16, 0xb8, 0xca, 0x5c, 0xf9, 0xd7, 0x1e, 0xc6,
+	0x5c, 0x39, 0x37, 0xc0, 0xbf, 0x46, 0x99, 0x19, 0x5a, 0x49, 0x1f, 0xcb, 0xbf, 0x60, 0x56, 0xcb,
+	0x77, 0x89, 0xb7, 0xbd, 0x13, 0x13, 0x37, 0xfd, 0x39, 0x27, 0x67, 0xb5, 0xfc, 0xdf, 0x2d, 0x98,
+	0x4a, 0xb5, 0x8e, 0x9a, 0x30, 0xd1, 0xd4, 0x15, 0x93, 0x62, 0x9d, 0x3e, 0x94, 0x4e, 0x53, 0x58,
+	0x7d, 0x6a, 0x45, 0xd8, 0x24, 0x8e, 0x16, 0x00, 0xd4, 0x4b, 0x9d, 0xd4, 0x80, 0x31, 0xa9, 0x5f,
+	0x3d, 0xe5, 0x45, 0x58, 0xc3, 0x40, 0x2f, 0xc3, 0x58, 0x1c, 0xb4, 0x83, 0x66, 0xb0, 0xbd, 0x7f,
+	0x83, 0xc8, 0xc8, 0x45, 0xca, 0x96, 0x6b, 0x23, 0x01, 0x61, 0x1d, 0xcf, 0xfe, 0x89, 0x22, 0xff,
+	0x50, 0x3f, 0xf6, 0xde, 0x5f, 0x93, 0xef, 0xed, 0x35, 0xf9, 0x15, 0x0b, 0xa6, 0x69, 0xeb, 0xcc,
+	0x5c, 0x44, 0x1e, 0xb6, 0x2a, 0x66, 0xb0, 0xd5, 0x23, 0x66, 0xf0, 0x65, 0xca, 0xbb, 0xdc, 0xa0,
+	0x13, 0x0b, 0x0d, 0x9a, 0xc6, 0x9c, 0x68, 0x29, 0x16, 0x50, 0x81, 0x47, 0xc2, 0x50, 0xb8, 0xb8,
+	0xe9, 0x78, 0x24, 0x0c, 0xb1, 0x80, 0xca, 0x90, 0xc2, 0x43, 0xd9, 0x21, 0x85, 0x79, 0x1c, 0x46,
+	0x61, 0x58, 0x20, 0xc4, 0x1e, 0x2d, 0x0e, 0xa3, 0xb4, 0x38, 0x48, 0x70, 0xec, 0x9f, 0x29, 0xc2,
+	0x78, 0x2d, 0x70, 0x93, 0xb7, 0xb2, 0x97, 0x8c, 0xb7, 0xb2, 0x4b, 0xa9, 0xb7, 0xb2, 0x69, 0x1d,
+	0xf7, 0xfd, 0x97, 0xb1, 0xaf, 0xd5, 0xcb, 0xd8, 0xbf, 0xb0, 0xd8, 0xac, 0x55, 0xd6, 0xeb, 0xdc,
+	0xfa, 0x08, 0x3d, 0x0f, 0x63, 0x8c, 0x21, 0x31, 0x9f, 0x4a, 0xf9, 0x80, 0xc4, 0x52, 0xe5, 0xac,
+	0x27, 0xc5, 0x58, 0xc7, 0x41, 0x57, 0xa0, 0x14, 0x11, 0x27, 0x6c, 0xec, 0x28, 0x1e, 0x27, 0x5e,
+	0x7b, 0x78, 0x19, 0x56, 0x50, 0xf4, 0x66, 0x12, 0x02, 0xb0, 0x98, 0xef, 0xa3, 0xa5, 0xf7, 0x87,
+	0x6f, 0x91, 0xfc, 0xb8, 0x7f, 0xf6, 0x5d, 0x40, 0xdd, 0xf8, 0x03, 0xc4, 0xbe, 0x9a, 0x37, 0x63,
+	0x5f, 0x95, 0xbb, 0xe2, 0x5e, 0xfd, 0x99, 0x05, 0x93, 0xb5, 0xc0, 0xa5, 0x5b, 0xf7, 0xeb, 0x69,
+	0x9f, 0xea, 0xf1, 0x4f, 0x47, 0x7a, 0xc4, 0x3f, 0x7d, 0x12, 0x86, 0x6b, 0x81, 0x5b, 0xad, 0xf5,
+	0xf2, 0x6d, 0xb6, 0xff, 0x9e, 0x05, 0xa3, 0xb5, 0xc0, 0x3d, 0x01, 0xe5, 0xfc, 0xc7, 0x4d, 0xe5,
+	0xfc, 0x63, 0x39, 0xeb, 0x26, 0x47, 0x1f, 0xff, 0x77, 0x86, 0x60, 0x82, 0xf6, 0x33, 0xd8, 0x96,
+	0x53, 0x69, 0x0c, 0x9b, 0x35, 0xc0, 0xb0, 0x51, 0x59, 0x38, 0x68, 0x36, 0x83, 0x7b, 0xe9, 0x69,
+	0x5d, 0x65, 0xa5, 0x58, 0x40, 0xd1, 0xb3, 0x50, 0x6a, 0x87, 0x64, 0xcf, 0x0b, 0x84, 0x90, 0xa9,
+	0x3d, 0x75, 0xd4, 0x44, 0x39, 0x56, 0x18, 0xf4, 0x72, 0x16, 0x79, 0x7e, 0x83, 0xd4, 0x49, 0x23,
+	0xf0, 0x5d, 0xae, 0xbf, 0x2e, 0x8a, 0xb4, 0x01, 0x5a, 0x39, 0x36, 0xb0, 0xd0, 0x5d, 0x28, 0xb3,
+	0xff, 0x8c, 0xed, 0x1c, 0x3d, 0x01, 0xa5, 0x48, 0x48, 0x26, 0x08, 0xe0, 0x84, 0x16, 0x7a, 0x01,
+	0x20, 0x96, 0x11, 0xb2, 0x23, 0x11, 0xe7, 0x48, 0x09, 0xe4, 0x2a, 0x76, 0x76, 0x84, 0x35, 0x2c,
+	0xf4, 0x0c, 0x94, 0x63, 0xc7, 0x6b, 0xde, 0xf4, 0x7c, 0x12, 0x31, 0xbd, 0x74, 0x51, 0xe6, 0x05,
+	0x13, 0x85, 0x38, 0x81, 0x53, 0x81, 0x88, 0x05, 0x01, 0xe0, 0xe9, 0x6b, 0x4b, 0x0c, 0x9b, 0x09,
+	0x44, 0x37, 0x55, 0x29, 0xd6, 0x30, 0xd0, 0x0e, 0x9c, 0xf7, 0x7c, 0x16, 0x62, 0x9f, 0xd4, 0x77,
+	0xbd, 0xf6, 0xc6, 0xcd, 0xfa, 0x1d, 0x12, 0x7a, 0x5b, 0xfb, 0x4b, 0x4e, 0x63, 0x97, 0xf8, 0x32,
+	0xb5, 0xe0, 0x07, 0x45, 0x17, 0xcf, 0x57, 0x7b, 0xe0, 0xe2, 0x9e, 0x94, 0xec, 0x57, 0xe1, 0x4c,
+	0x2d, 0x70, 0x6b, 0x41, 0x18, 0xaf, 0x06, 0xe1, 0x3d, 0x27, 0x74, 0xe5, 0x4a, 0x99, 0x97, 0x59,
+	0x48, 0x28, 0x2b, 0x1c, 0xe6, 0x8c, 0xc2, 0xc8, 0x85, 0xf5, 0x22, 0x13, 0xbe, 0x8e, 0xe8, 0x8c,
+	0xd2, 0x60, 0x62, 0x80, 0xca, 0x37, 0x71, 0xcd, 0x89, 0x09, 0xba, 0xc5, 0xf2, 0xe8, 0x26, 0x27,
+	0xa2, 0xa8, 0xfe, 0xb4, 0x96, 0x47, 0x37, 0x01, 0x66, 0x1e, 0xa1, 0x66, 0x7d, 0xfb, 0xaf, 0x0d,
+	0x33, 0xe6, 0x98, 0xca, 0x59, 0x80, 0x3e, 0x0b, 0x93, 0x11, 0xb9, 0xe9, 0xf9, 0x9d, 0xfb, 0x52,
+	0x27, 0xd0, 0xc3, 0x9d, 0xa8, 0xbe, 0xa2, 0x63, 0x72, 0xcd, 0xa2, 0x59, 0x86, 0x53, 0xd4, 0x50,
+	0x0b, 0x26, 0xef, 0x79, 0xbe, 0x1b, 0xdc, 0x8b, 0x24, 0xfd, 0x52, 0xbe, 0x82, 0xf1, 0x2e, 0xc7,
+	0x4c, 0xf5, 0xd1, 0x68, 0xee, 0xae, 0x41, 0x0c, 0xa7, 0x88, 0xd3, 0x05, 0x18, 0x76, 0xfc, 0xc5,
+	0xe8, 0x76, 0x44, 0x42, 0x91, 0x11, 0x99, 0x2d, 0x40, 0x2c, 0x0b, 0x71, 0x02, 0xa7, 0x0b, 0x90,
+	0xfd, 0xb9, 0x16, 0x06, 0x1d, 0x1e, 0xc7, 0x5e, 0x2c, 0x40, 0xac, 0x4a, 0xb1, 0x86, 0x41, 0x37,
+	0x28, 0xfb, 0xb7, 0x1e, 0xf8, 0x38, 0x08, 0x62, 0xb9, 0xa5, 0x59, 0x0e, 0x4e, 0xad, 0x1c, 0x1b,
+	0x58, 0x68, 0x15, 0x50, 0xd4, 0x69, 0xb7, 0x9b, 0xcc, 0x4e, 0xc1, 0x69, 0x32, 0x52, 0xfc, 0x8d,
+	0xb8, 0xc8, 0xa3, 0x74, 0xd6, 0xbb, 0xa0, 0x38, 0xa3, 0x06, 0xe5, 0xd5, 0x5b, 0xa2, 0xab, 0xc3,
+	0xac, 0xab, 0xfc, 0x31, 0xa2, 0xce, 0xfb, 0x29, 0x61, 0x68, 0x05, 0x46, 0xa3, 0xfd, 0xa8, 0x11,
+	0x8b, 0x70, 0x63, 0x39, 0x69, 0x69, 0xea, 0x0c, 0x45, 0xcb, 0x8a, 0xc6, 0xab, 0x60, 0x59, 0x17,
+	0x35, 0xe0, 0x94, 0xa0, 0xb8, 0xbc, 0xe3, 0xf8, 0x2a, 0xc9, 0x07, 0x37, 0xd7, 0x7c, 0xfe, 0xc1,
+	0xc1, 0xfc, 0x29, 0xd1, 0xb2, 0x0e, 0x3e, 0x3c, 0x98, 0x3f, 0x5b, 0x0b, 0xdc, 0x0c, 0x08, 0xce,
+	0xa2, 0x66, 0x7f, 0x2b, 0x93, 0x37, 0x58, 0x92, 0xde, 0xb8, 0x13, 0x12, 0xd4, 0x82, 0x89, 0x36,
+	0x5b, 0xc6, 0x22, 0xfa, 0xbb, 0x58, 0x8b, 0x2f, 0x0d, 0xa8, 0x38, 0xb8, 0x47, 0xd9, 0xb4, 0x52,
+	0xec, 0xb1, 0x1b, 0x59, 0x4d, 0x27, 0x87, 0x4d, 0xea, 0xf6, 0x57, 0xce, 0xb2, 0x13, 0xab, 0xce,
+	0xb5, 0x01, 0xa3, 0xc2, 0x7a, 0x5b, 0x5c, 0x7d, 0xe6, 0xf2, 0xd5, 0x52, 0xc9, 0xb0, 0x09, 0x0b,
+	0x70, 0x2c, 0xeb, 0xa2, 0xcf, 0xc0, 0x24, 0xbd, 0x49, 0x68, 0xd9, 0x2f, 0x4e, 0xe7, 0x7b, 0xd9,
+	0x27, 0x49, 0x2f, 0xb4, 0xcc, 0x10, 0x7a, 0x65, 0x9c, 0x22, 0x86, 0xde, 0x64, 0x76, 0x06, 0x66,
+	0x62, 0x8d, 0x3e, 0xa4, 0x75, 0x93, 0x02, 0x49, 0x56, 0x23, 0x92, 0x97, 0xb4, 0xc3, 0x7e, 0xb4,
+	0x49, 0x3b, 0xd0, 0x4d, 0x98, 0x10, 0x99, 0x6a, 0xc5, 0xca, 0x2a, 0x1a, 0xda, 0xb2, 0x09, 0xac,
+	0x03, 0x0f, 0xd3, 0x05, 0xd8, 0xac, 0x8c, 0xb6, 0xe1, 0x82, 0x96, 0x39, 0xe6, 0x5a, 0xe8, 0xb0,
+	0x27, 0x6f, 0x8f, 0xb1, 0x3b, 0xed, 0x2c, 0x7d, 0xe2, 0xc1, 0xc1, 0xfc, 0x85, 0x8d, 0x5e, 0x88,
+	0xb8, 0x37, 0x1d, 0x74, 0x0b, 0xce, 0x70, 0x1f, 0xd1, 0x0a, 0x71, 0xdc, 0xa6, 0xe7, 0xab, 0xc3,
+	0x9a, 0x6f, 0xc9, 0x73, 0x0f, 0x0e, 0xe6, 0xcf, 0x2c, 0x66, 0x21, 0xe0, 0xec, 0x7a, 0xe8, 0xe3,
+	0x50, 0x76, 0xfd, 0x48, 0x8c, 0xc1, 0x88, 0x91, 0x9c, 0xa7, 0x5c, 0x59, 0xaf, 0xab, 0xef, 0x4f,
+	0xfe, 0xe0, 0xa4, 0x02, 0xda, 0xe6, 0x1a, 0x55, 0xa5, 0xc0, 0x18, 0xed, 0x8a, 0x6e, 0x93, 0x56,
+	0x85, 0x19, 0x5e, 0x62, 0xfc, 0x29, 0x41, 0x19, 0x4f, 0x1b, 0x0e, 0x64, 0x06, 0x61, 0xf4, 0x06,
+	0x20, 0x2a, 0xe1, 0x7b, 0x0d, 0xb2, 0xd8, 0x60, 0xa9, 0x05, 0x98, 0x02, 0xba, 0x64, 0xfa, 0x2d,
+	0xd5, 0xbb, 0x30, 0x70, 0x46, 0x2d, 0x74, 0x9d, 0x1e, 0x39, 0x7a, 0xa9, 0xe0, 0x2a, 0x2a, 0x95,
+	0x5a, 0x85, 0xb4, 0x43, 0xd2, 0x70, 0x62, 0xe2, 0x9a, 0x14, 0x71, 0xaa, 0x1e, 0x72, 0xe1, 0xbc,
+	0xd3, 0x89, 0x03, 0xa6, 0xac, 0x36, 0x51, 0x37, 0x82, 0x5d, 0xe2, 0xb3, 0x77, 0xa2, 0xd2, 0xd2,
+	0x25, 0x2a, 0x0d, 0x2c, 0xf6, 0xc0, 0xc3, 0x3d, 0xa9, 0x50, 0x29, 0x4e, 0xe5, 0x4e, 0x05, 0x33,
+	0x68, 0x4f, 0x46, 0xfe, 0xd4, 0x97, 0x61, 0x6c, 0x27, 0x88, 0xe2, 0x75, 0x12, 0xdf, 0x0b, 0xc2,
+	0x5d, 0x11, 0x7a, 0x31, 0x09, 0xd7, 0x9b, 0x80, 0xb0, 0x8e, 0x47, 0xaf, 0x69, 0xcc, 0x8a, 0xa1,
+	0x5a, 0x61, 0x0f, 0xc8, 0xa5, 0x84, 0xc7, 0x5c, 0xe7, 0xc5, 0x58, 0xc2, 0x25, 0x6a, 0xb5, 0xb6,
+	0xcc, 0x1e, 0x83, 0x53, 0xa8, 0xd5, 0xda, 0x32, 0x96, 0x70, 0xba, 0x5c, 0xa3, 0x1d, 0x27, 0x24,
+	0xb5, 0x30, 0x68, 0x90, 0x48, 0x0b, 0x12, 0xfd, 0x38, 0x0f, 0x2c, 0x49, 0x97, 0x6b, 0x3d, 0x0b,
+	0x01, 0x67, 0xd7, 0x43, 0xa4, 0x3b, 0x6b, 0xd2, 0x64, 0xbe, 0x16, 0xbf, 0x5b, 0xde, 0x18, 0x30,
+	0x71, 0x92, 0x0f, 0xd3, 0x2a, 0x5f, 0x13, 0x0f, 0x25, 0x19, 0xcd, 0x4e, 0xb1, 0xb5, 0x3d, 0x78,
+	0x1c, 0x4a, 0xf5, 0x2e, 0x52, 0x4d, 0x51, 0xc2, 0x5d, 0xb4, 0x8d, 0xb8, 0x4c, 0xd3, 0x7d, 0x93,
+	0xe9, 0x5e, 0x85, 0x72, 0xd4, 0xd9, 0x74, 0x83, 0x96, 0xe3, 0xf9, 0xec, 0x31, 0x58, 0xbb, 0x2f,
+	0xd4, 0x25, 0x00, 0x27, 0x38, 0x68, 0x15, 0x4a, 0x8e, 0x7c, 0xf4, 0x40, 0xf9, 0xe1, 0x3c, 0xd4,
+	0x53, 0x07, 0xf7, 0x70, 0x97, 0xcf, 0x1c, 0xaa, 0x2e, 0x7a, 0x0d, 0x26, 0x84, 0x8f, 0xa3, 0x48,
+	0x15, 0x78, 0xca, 0x74, 0x44, 0xa9, 0xeb, 0x40, 0x6c, 0xe2, 0xa2, 0xdb, 0x30, 0x16, 0x07, 0x4d,
+	0xe6, 0x4d, 0x41, 0xc5, 0xb0, 0xb3, 0xf9, 0x21, 0xc1, 0x36, 0x14, 0x9a, 0xae, 0x6f, 0x54, 0x55,
+	0xb1, 0x4e, 0x07, 0x6d, 0xf0, 0xf5, 0xce, 0x82, 0x25, 0x93, 0x68, 0xf6, 0xb1, 0xfc, 0x33, 0x49,
+	0xc5, 0x54, 0x36, 0xb7, 0x83, 0xa8, 0x89, 0x75, 0x32, 0xe8, 0x1a, 0xcc, 0xb4, 0x43, 0x2f, 0x60,
+	0x6b, 0x42, 0xbd, 0x77, 0xcd, 0x9a, 0x29, 0x5e, 0x6a, 0x69, 0x04, 0xdc, 0x5d, 0x87, 0xb9, 0xa8,
+	0x8a, 0xc2, 0xd9, 0x73, 0x3c, 0x9b, 0x30, 0xbf, 0x7e, 0xf1, 0x32, 0xac, 0xa0, 0x68, 0x8d, 0x71,
+	0x62, 0xae, 0x39, 0x98, 0x9d, 0xcb, 0x8f, 0x20, 0xa2, 0x6b, 0x18, 0xb8, 0x70, 0xa9, 0xfe, 0xe2,
+	0x84, 0x02, 0x72, 0xb5, 0xb4, 0x73, 0x54, 0xa2, 0x8f, 0x66, 0xcf, 0xf7, 0x30, 0x25, 0x4b, 0x89,
+	0xff, 0x89, 0x40, 0x60, 0x14, 0x47, 0x38, 0x45, 0x13, 0x7d, 0x13, 0x4c, 0x8b, 0x88, 0x65, 0xc9,
+	0x30, 0x5d, 0x48, 0x6c, 0x54, 0x71, 0x0a, 0x86, 0xbb, 0xb0, 0x79, 0x10, 0x79, 0x67, 0xb3, 0x49,
+	0x04, 0xeb, 0xbb, 0xe9, 0xf9, 0xbb, 0xd1, 0xec, 0x45, 0xc6, 0x1f, 0x44, 0x10, 0xf9, 0x34, 0x14,
+	0x67, 0xd4, 0x40, 0x1b, 0x30, 0xdd, 0x0e, 0x09, 0x69, 0x31, 0x41, 0x5c, 0x9c, 0x67, 0xf3, 0xdc,
+	0x43, 0x9b, 0xf6, 0xa4, 0x96, 0x82, 0x1d, 0x66, 0x94, 0xe1, 0x2e, 0x0a, 0xe8, 0x1e, 0x94, 0x82,
+	0x3d, 0x12, 0xee, 0x10, 0xc7, 0x9d, 0xbd, 0xd4, 0xc3, 0x66, 0x5a, 0x1c, 0x6e, 0xb7, 0x04, 0x6e,
+	0xea, 0x8d, 0x5c, 0x16, 0xf7, 0x7f, 0x23, 0x97, 0x8d, 0xa1, 0xef, 0xb7, 0xe0, 0x9c, 0x54, 0xab,
+	0xd7, 0xdb, 0x74, 0xd4, 0x97, 0x03, 0x3f, 0x8a, 0x43, 0xee, 0x53, 0xfc, 0x44, 0xbe, 0x9f, 0xed,
+	0x46, 0x4e, 0x25, 0xa5, 0xbc, 0x3c, 0x97, 0x87, 0x11, 0xe1, 0xfc, 0x16, 0xe7, 0xbe, 0x11, 0x66,
+	0xba, 0x4e, 0xee, 0xa3, 0xe4, 0xb5, 0x98, 0xdb, 0x85, 0x09, 0x63, 0x74, 0x1e, 0xe9, 0xf3, 0xe8,
+	0xbf, 0x19, 0x85, 0xb2, 0x7a, 0x3a, 0x43, 0x57, 0xcd, 0x17, 0xd1, 0x73, 0xe9, 0x17, 0xd1, 0x12,
+	0xbd, 0x32, 0xeb, 0x8f, 0xa0, 0x1b, 0x19, 0x11, 0x9c, 0xf2, 0xf6, 0xe2, 0xe0, 0xae, 0xb9, 0x9a,
+	0x26, 0xb4, 0x38, 0xf0, 0xd3, 0xea, 0x50, 0x4f, 0xe5, 0xea, 0x35, 0x98, 0xf1, 0x03, 0x26, 0x2e,
+	0x12, 0x57, 0xca, 0x02, 0xec, 0xc8, 0x2f, 0xeb, 0x21, 0x11, 0x52, 0x08, 0xb8, 0xbb, 0x0e, 0x6d,
+	0x90, 0x9f, 0xd9, 0x69, 0x6d, 0x2e, 0x3f, 0xd2, 0xb1, 0x80, 0xa2, 0x27, 0x61, 0xb8, 0x1d, 0xb8,
+	0xd5, 0x9a, 0x10, 0x15, 0xb5, 0x1c, 0xa7, 0x6e, 0xb5, 0x86, 0x39, 0x0c, 0x2d, 0xc2, 0x08, 0xfb,
+	0x11, 0xcd, 0x8e, 0xe7, 0xfb, 0xbe, 0xb3, 0x1a, 0x5a, 0xd6, 0x10, 0x56, 0x01, 0x8b, 0x8a, 0x4c,
+	0xab, 0x44, 0xe5, 0x6b, 0xa6, 0x55, 0x1a, 0x7d, 0x48, 0xad, 0x92, 0x24, 0x80, 0x13, 0x5a, 0xe8,
+	0x3e, 0x9c, 0x31, 0xee, 0x34, 0x7c, 0x89, 0x90, 0x48, 0xf8, 0xdf, 0x3e, 0xd9, 0xf3, 0x32, 0x23,
+	0x9e, 0x62, 0x2f, 0x88, 0x4e, 0x9f, 0xa9, 0x66, 0x51, 0xc2, 0xd9, 0x0d, 0xa0, 0x26, 0xcc, 0x34,
+	0xba, 0x5a, 0x2d, 0x0d, 0xde, 0xaa, 0x9a, 0xd0, 0xee, 0x16, 0xbb, 0x09, 0xa3, 0xd7, 0xa0, 0xf4,
+	0x76, 0x10, 0x31, 0x36, 0x2b, 0xc4, 0x5b, 0xe9, 0xbc, 0x59, 0x7a, 0xf3, 0x56, 0x9d, 0x95, 0x1f,
+	0x1e, 0xcc, 0x8f, 0xd5, 0x02, 0x57, 0xfe, 0xc5, 0xaa, 0x02, 0xfa, 0x6e, 0x0b, 0xe6, 0xba, 0x2f,
+	0x4d, 0xaa, 0xd3, 0x13, 0x83, 0x77, 0xda, 0x16, 0x8d, 0xce, 0xad, 0xe4, 0x92, 0xc3, 0x3d, 0x9a,
+	0xb2, 0x7f, 0x91, 0x3f, 0x9b, 0x8a, 0xc7, 0x15, 0x12, 0x75, 0x9a, 0x27, 0x91, 0x65, 0x71, 0xc5,
+	0x78, 0xf7, 0x79, 0xe8, 0xa7, 0xf9, 0x5f, 0xb5, 0xd8, 0xd3, 0xfc, 0x06, 0x69, 0xb5, 0x9b, 0x4e,
+	0x7c, 0x12, 0xbe, 0x7f, 0x6f, 0x42, 0x29, 0x16, 0xad, 0xf5, 0x4a, 0x0c, 0xa9, 0x75, 0x8a, 0x99,
+	0x27, 0x28, 0x61, 0x53, 0x96, 0x62, 0x45, 0xc6, 0xfe, 0xa7, 0x7c, 0x06, 0x24, 0xe4, 0x04, 0xd4,
+	0xeb, 0x15, 0x53, 0xbd, 0x3e, 0xdf, 0xe7, 0x0b, 0x72, 0xd4, 0xec, 0xff, 0xc4, 0xec, 0x37, 0x53,
+	0xb2, 0xbc, 0xd7, 0x6d, 0x42, 0xec, 0x1f, 0xb4, 0xe0, 0x74, 0x96, 0x11, 0x25, 0xbd, 0x20, 0x70,
+	0x15, 0x8f, 0xb2, 0x91, 0x51, 0x23, 0x78, 0x47, 0x94, 0x63, 0x85, 0x31, 0x70, 0xce, 0xa5, 0xa3,
+	0xc5, 0x20, 0xbd, 0x05, 0x13, 0xb5, 0x90, 0x68, 0x07, 0xda, 0xeb, 0xdc, 0x99, 0x97, 0xf7, 0xe7,
+	0xd9, 0x23, 0x3b, 0xf2, 0xda, 0x3f, 0x55, 0x80, 0xd3, 0xfc, 0x91, 0x7b, 0x71, 0x2f, 0xf0, 0xdc,
+	0x5a, 0xe0, 0x8a, 0x7c, 0x59, 0x9f, 0x86, 0xf1, 0xb6, 0xa6, 0x97, 0xeb, 0x15, 0x4f, 0x4f, 0xd7,
+	0xdf, 0x25, 0x9a, 0x04, 0xbd, 0x14, 0x1b, 0xb4, 0x90, 0x0b, 0xe3, 0x64, 0xcf, 0x6b, 0xa8, 0x97,
+	0xd2, 0xc2, 0x91, 0x0f, 0x17, 0xd5, 0xca, 0x8a, 0x46, 0x07, 0x1b, 0x54, 0x1f, 0x41, 0x0a, 0x55,
+	0xfb, 0x87, 0x2c, 0x78, 0x2c, 0x27, 0xfa, 0x1e, 0x6d, 0xee, 0x1e, 0x33, 0x27, 0x10, 0xd9, 0x18,
+	0x55, 0x73, 0xdc, 0xc8, 0x00, 0x0b, 0x28, 0xfa, 0x24, 0x00, 0x37, 0x12, 0xa0, 0x37, 0xd4, 0x7e,
+	0x61, 0xca, 0x8c, 0x08, 0x4b, 0x5a, 0xb0, 0x1c, 0x59, 0x1f, 0x6b, 0xb4, 0xec, 0x1f, 0x2f, 0xc2,
+	0x30, 0xcf, 0x23, 0xbd, 0x0a, 0xa3, 0x3b, 0x3c, 0x8b, 0xc0, 0x20, 0x09, 0x0b, 0x12, 0xdd, 0x01,
+	0x2f, 0xc0, 0xb2, 0x32, 0x5a, 0x83, 0x53, 0x3c, 0x0b, 0x43, 0xb3, 0x42, 0x9a, 0xce, 0xbe, 0x54,
+	0x74, 0xf1, 0x0c, 0x86, 0x4a, 0xe1, 0x57, 0xed, 0x46, 0xc1, 0x59, 0xf5, 0xd0, 0xeb, 0x30, 0x49,
+	0x2f, 0x1e, 0x41, 0x27, 0x96, 0x94, 0x78, 0xfe, 0x05, 0x75, 0xd3, 0xd9, 0x30, 0xa0, 0x38, 0x85,
+	0x4d, 0xef, 0xbe, 0xed, 0x2e, 0x95, 0xde, 0x70, 0x72, 0xf7, 0x35, 0xd5, 0x78, 0x26, 0x2e, 0xb3,
+	0x9e, 0xec, 0x30, 0x5b, 0xd1, 0x8d, 0x9d, 0x90, 0x44, 0x3b, 0x41, 0xd3, 0x65, 0x82, 0xd6, 0xb0,
+	0x66, 0x3d, 0x99, 0x82, 0xe3, 0xae, 0x1a, 0x94, 0xca, 0x96, 0xe3, 0x35, 0x3b, 0x21, 0x49, 0xa8,
+	0x8c, 0x98, 0x54, 0x56, 0x53, 0x70, 0xdc, 0x55, 0x83, 0xae, 0xa3, 0x33, 0xb5, 0x30, 0xa0, 0xcc,
+	0x4b, 0x86, 0x14, 0x51, 0x26, 0xb1, 0xa3, 0xd2, 0xfb, 0xb1, 0x47, 0xf0, 0x2d, 0x61, 0x34, 0xc8,
+	0x29, 0x18, 0xef, 0xe1, 0x75, 0xe1, 0xf7, 0x28, 0xa9, 0xa0, 0xe7, 0x61, 0x4c, 0xc4, 0xd6, 0x67,
+	0x96, 0x9b, 0x7c, 0xea, 0xd8, 0xfb, 0x7d, 0x25, 0x29, 0xc6, 0x3a, 0x8e, 0xfd, 0x3d, 0x05, 0x38,
+	0x95, 0x61, 0x7a, 0xcf, 0x59, 0xd5, 0xb6, 0x17, 0xc5, 0x2a, 0x4b, 0x9b, 0xc6, 0xaa, 0x78, 0x39,
+	0x56, 0x18, 0x74, 0x3f, 0x70, 0x66, 0x98, 0x66, 0x80, 0xc2, 0xb4, 0x55, 0x40, 0x8f, 0x98, 0xef,
+	0xec, 0x12, 0x0c, 0x75, 0x22, 0x22, 0xc3, 0xe6, 0x29, 0xfe, 0xcd, 0x9e, 0x75, 0x18, 0x84, 0x8a,
+	0xc7, 0xdb, 0xea, 0x85, 0x44, 0x13, 0x8f, 0xf9, 0x1b, 0x09, 0x87, 0xd1, 0xce, 0xc5, 0xc4, 0x77,
+	0xfc, 0x58, 0x08, 0xd1, 0x49, 0xfc, 0x27, 0x56, 0x8a, 0x05, 0xd4, 0xfe, 0x52, 0x11, 0xce, 0xe5,
+	0x3a, 0xe3, 0xd0, 0xae, 0xb7, 0x02, 0xdf, 0x8b, 0x03, 0x65, 0x18, 0xc1, 0x63, 0x3e, 0x91, 0xf6,
+	0xce, 0x9a, 0x28, 0xc7, 0x0a, 0x03, 0x5d, 0x86, 0x61, 0xa6, 0x74, 0xea, 0xca, 0x57, 0xb7, 0x54,
+	0xe1, 0x41, 0x40, 0x38, 0x78, 0xe0, 0x5c, 0xa0, 0x4f, 0xc2, 0x50, 0x3b, 0x08, 0x9a, 0x69, 0xa6,
+	0x45, 0xbb, 0x1b, 0x04, 0x4d, 0xcc, 0x80, 0xe8, 0x43, 0x62, 0xbc, 0x52, 0x96, 0x00, 0xd8, 0x71,
+	0x83, 0x48, 0x1b, 0xb4, 0xa7, 0x61, 0x74, 0x97, 0xec, 0x87, 0x9e, 0xbf, 0x9d, 0xb6, 0x10, 0xb9,
+	0xc1, 0x8b, 0xb1, 0x84, 0x9b, 0xa9, 0x87, 0x46, 0x8f, 0x3b, 0x89, 0x67, 0xa9, 0xef, 0x11, 0xf8,
+	0xbd, 0x45, 0x98, 0xc2, 0x4b, 0x95, 0xf7, 0x27, 0xe2, 0x76, 0xf7, 0x44, 0x1c, 0x77, 0x12, 0xcf,
+	0xfe, 0xb3, 0xf1, 0x73, 0x16, 0x4c, 0xb1, 0x08, 0xff, 0x22, 0x5a, 0x90, 0x17, 0xf8, 0x27, 0x20,
+	0xe2, 0x3d, 0x09, 0xc3, 0x21, 0x6d, 0x34, 0x9d, 0xa8, 0x8e, 0xf5, 0x04, 0x73, 0x18, 0x3a, 0x0f,
+	0x43, 0xac, 0x0b, 0x74, 0xf2, 0xc6, 0x79, 0x8e, 0x9f, 0x8a, 0x13, 0x3b, 0x98, 0x95, 0xb2, 0x10,
+	0x18, 0x98, 0xb4, 0x9b, 0x1e, 0xef, 0x74, 0xf2, 0x24, 0xf8, 0xde, 0x08, 0x81, 0x91, 0xd9, 0xb5,
+	0x77, 0x17, 0x02, 0x23, 0x9b, 0x64, 0xef, 0xeb, 0xd3, 0x1f, 0x16, 0xe0, 0x62, 0x66, 0xbd, 0x81,
+	0x43, 0x60, 0xf4, 0xae, 0xfd, 0x28, 0x23, 0xc1, 0x17, 0x4f, 0xd0, 0xfe, 0x6e, 0x68, 0x50, 0x09,
+	0x73, 0x78, 0x80, 0xc8, 0x14, 0x99, 0x43, 0xf6, 0x1e, 0x89, 0x4c, 0x91, 0xd9, 0xb7, 0x9c, 0xeb,
+	0xdf, 0x9f, 0x17, 0x72, 0xbe, 0x85, 0x5d, 0x04, 0xaf, 0x50, 0x3e, 0xc3, 0x80, 0x91, 0x90, 0x98,
+	0xc7, 0x39, 0x8f, 0xe1, 0x65, 0x58, 0x41, 0xd1, 0x22, 0x4c, 0xb5, 0x3c, 0x9f, 0x32, 0x9f, 0x7d,
+	0x53, 0xf0, 0x53, 0x81, 0x83, 0xd6, 0x4c, 0x30, 0x4e, 0xe3, 0x23, 0x4f, 0x8b, 0x5a, 0x51, 0xc8,
+	0x4f, 0xfd, 0x9c, 0xdb, 0xdb, 0x05, 0xf3, 0xb9, 0x54, 0x8d, 0x62, 0x46, 0x04, 0x8b, 0x35, 0xed,
+	0xfe, 0x5f, 0x1c, 0xfc, 0xfe, 0x3f, 0x9e, 0x7d, 0xf7, 0x9f, 0x7b, 0x0d, 0x26, 0x1e, 0x5a, 0xe1,
+	0x6b, 0x7f, 0xa5, 0x08, 0x8f, 0xf7, 0xd8, 0xf6, 0x9c, 0xd7, 0x1b, 0x73, 0xa0, 0xf1, 0xfa, 0xae,
+	0x79, 0xa8, 0xc1, 0xe9, 0xad, 0x4e, 0xb3, 0xb9, 0xcf, 0x4c, 0xdc, 0x89, 0x2b, 0x31, 0x84, 0x4c,
+	0x79, 0x5e, 0x66, 0x55, 0x5a, 0xcd, 0xc0, 0xc1, 0x99, 0x35, 0xa9, 0x40, 0x4f, 0x4f, 0x92, 0x7d,
+	0x45, 0x2a, 0x25, 0xd0, 0x63, 0x1d, 0x88, 0x4d, 0x5c, 0x74, 0x0d, 0x66, 0x9c, 0x3d, 0xc7, 0xe3,
+	0xa1, 0x3f, 0x25, 0x01, 0x2e, 0xd1, 0x2b, 0x3d, 0xdd, 0x62, 0x1a, 0x01, 0x77, 0xd7, 0x41, 0x6f,
+	0x00, 0x0a, 0x44, 0xea, 0xfa, 0x6b, 0xc4, 0x17, 0xaf, 0x5a, 0x6c, 0xee, 0x8a, 0x09, 0x4b, 0xb8,
+	0xd5, 0x85, 0x81, 0x33, 0x6a, 0xa5, 0xa2, 0x40, 0x8c, 0xe4, 0x47, 0x81, 0xe8, 0xcd, 0x17, 0xfb,
+	0x26, 0x21, 0xf8, 0x2f, 0x16, 0x3d, 0xbe, 0xb8, 0x90, 0x6f, 0x06, 0x33, 0x7b, 0x8d, 0x59, 0x8d,
+	0x71, 0x1d, 0x9e, 0x16, 0x90, 0xe1, 0x8c, 0x66, 0x35, 0x96, 0x00, 0xb1, 0x89, 0xcb, 0x17, 0x44,
+	0x94, 0xf8, 0x01, 0x1a, 0x22, 0xbe, 0x88, 0xb8, 0xa2, 0x30, 0xd0, 0xa7, 0x60, 0xd4, 0xf5, 0xf6,
+	0xbc, 0x28, 0x08, 0xc5, 0x4a, 0x3f, 0xe2, 0x73, 0x41, 0xc2, 0x07, 0x2b, 0x9c, 0x0c, 0x96, 0xf4,
+	0xec, 0xef, 0x2d, 0xc0, 0x84, 0x6c, 0xf1, 0xcd, 0x4e, 0x10, 0x3b, 0x27, 0x70, 0x2c, 0x5f, 0x33,
+	0x8e, 0xe5, 0x0f, 0xf5, 0x0a, 0x3b, 0xc3, 0xba, 0x94, 0x7b, 0x1c, 0xdf, 0x4a, 0x1d, 0xc7, 0x4f,
+	0xf5, 0x27, 0xd5, 0xfb, 0x18, 0xfe, 0x67, 0x16, 0xcc, 0x18, 0xf8, 0x27, 0x70, 0x1a, 0xac, 0x9a,
+	0xa7, 0xc1, 0x13, 0x7d, 0xbf, 0x21, 0xe7, 0x14, 0xf8, 0xce, 0x62, 0xaa, 0xef, 0x8c, 0xfb, 0xbf,
+	0x0d, 0x43, 0x3b, 0x4e, 0xe8, 0xf6, 0x0a, 0xb3, 0xdd, 0x55, 0x69, 0xe1, 0xba, 0x13, 0x8a, 0x67,
+	0xbd, 0x67, 0x55, 0xe6, 0x65, 0x27, 0xec, 0xff, 0xa4, 0xc7, 0x9a, 0x42, 0xaf, 0xc2, 0x48, 0xd4,
+	0x08, 0xda, 0xca, 0x28, 0xfd, 0x12, 0xcf, 0xca, 0x4c, 0x4b, 0x0e, 0x0f, 0xe6, 0x91, 0xd9, 0x1c,
+	0x2d, 0xc6, 0x02, 0x1f, 0x7d, 0x1a, 0x26, 0xd8, 0x2f, 0x65, 0x63, 0x53, 0xcc, 0x4f, 0xc9, 0x53,
+	0xd7, 0x11, 0xb9, 0x01, 0x9a, 0x51, 0x84, 0x4d, 0x52, 0x73, 0xdb, 0x50, 0x56, 0x9f, 0xf5, 0x48,
+	0xdf, 0xe3, 0xfe, 0x43, 0x11, 0x4e, 0x65, 0xac, 0x39, 0x14, 0x19, 0x33, 0xf1, 0xfc, 0x80, 0x4b,
+	0xf5, 0x5d, 0xce, 0x45, 0xc4, 0x6e, 0x43, 0xae, 0x58, 0x5b, 0x03, 0x37, 0x7a, 0x3b, 0x22, 0xe9,
+	0x46, 0x69, 0x51, 0xff, 0x46, 0x69, 0x63, 0x27, 0x36, 0xd4, 0xb4, 0x21, 0xd5, 0xd3, 0x47, 0x3a,
+	0xa7, 0x7f, 0x52, 0x84, 0xd3, 0x59, 0x91, 0xb0, 0xd0, 0xb7, 0xa4, 0xd2, 0xb3, 0xbd, 0x34, 0x68,
+	0x0c, 0x2d, 0x9e, 0xb3, 0x8d, 0xeb, 0x80, 0x97, 0x16, 0xcc, 0x84, 0x6d, 0x7d, 0x87, 0x59, 0xb4,
+	0xc9, 0x7c, 0xdc, 0x43, 0x9e, 0x56, 0x4f, 0xb2, 0x8f, 0x8f, 0x0e, 0xdc, 0x01, 0x91, 0x8f, 0x2f,
+	0x4a, 0xbd, 0xdf, 0xcb, 0xe2, 0xfe, 0xef, 0xf7, 0xb2, 0xe5, 0x39, 0x0f, 0xc6, 0xb4, 0xaf, 0x79,
+	0xa4, 0x33, 0xbe, 0x4b, 0x4f, 0x2b, 0xad, 0xdf, 0x8f, 0x74, 0xd6, 0x7f, 0xc8, 0x82, 0x94, 0xc9,
+	0xb5, 0x52, 0x8b, 0x59, 0xb9, 0x6a, 0xb1, 0x4b, 0x30, 0x14, 0x06, 0x4d, 0x92, 0xce, 0x86, 0x86,
+	0x83, 0x26, 0xc1, 0x0c, 0x42, 0x31, 0xe2, 0x44, 0xd9, 0x31, 0xae, 0x5f, 0xe4, 0xc4, 0x15, 0xed,
+	0x49, 0x18, 0x6e, 0x92, 0x3d, 0xd2, 0x4c, 0x27, 0xad, 0xb8, 0x49, 0x0b, 0x31, 0x87, 0xd9, 0x3f,
+	0x37, 0x04, 0x17, 0x7a, 0x46, 0x89, 0xa0, 0xd7, 0xa1, 0x6d, 0x27, 0x26, 0xf7, 0x9c, 0xfd, 0x74,
+	0x74, 0xf9, 0x6b, 0xbc, 0x18, 0x4b, 0x38, 0x73, 0x8a, 0xe1, 0x41, 0x62, 0x53, 0x4a, 0x44, 0x11,
+	0x1b, 0x56, 0x40, 0x4d, 0xa5, 0x54, 0xf1, 0x38, 0x94, 0x52, 0x2f, 0x00, 0x44, 0x51, 0x93, 0x1b,
+	0xbe, 0xb8, 0xc2, 0xdb, 0x26, 0x09, 0x26, 0x5c, 0xbf, 0x29, 0x20, 0x58, 0xc3, 0x42, 0x15, 0x98,
+	0x6e, 0x87, 0x41, 0xcc, 0x75, 0xb2, 0x15, 0x6e, 0x1b, 0x36, 0x6c, 0x3a, 0xe8, 0xd7, 0x52, 0x70,
+	0xdc, 0x55, 0x03, 0xbd, 0x0c, 0x63, 0xc2, 0x69, 0xbf, 0x16, 0x04, 0x4d, 0xa1, 0x06, 0x52, 0xe6,
+	0x52, 0xf5, 0x04, 0x84, 0x75, 0x3c, 0xad, 0x1a, 0x53, 0xf4, 0x8e, 0x66, 0x56, 0xe3, 0xca, 0x5e,
+	0x0d, 0x2f, 0x15, 0x15, 0xaf, 0x34, 0x50, 0x54, 0xbc, 0x44, 0x31, 0x56, 0x1e, 0xf8, 0x6d, 0x0b,
+	0xfa, 0xaa, 0x92, 0x7e, 0x7a, 0x08, 0x4e, 0x89, 0x85, 0xf3, 0xa8, 0x97, 0xcb, 0xed, 0xee, 0xe5,
+	0x72, 0x1c, 0xaa, 0xb3, 0xf7, 0xd7, 0xcc, 0x49, 0xaf, 0x99, 0xef, 0xb3, 0xc0, 0x14, 0xaf, 0xd0,
+	0xff, 0x9f, 0x9b, 0x9e, 0xe3, 0xe5, 0x5c, 0x71, 0xcd, 0x95, 0x07, 0xc8, 0xbb, 0x4c, 0xd4, 0x61,
+	0xff, 0x27, 0x0b, 0x9e, 0xe8, 0x4b, 0x11, 0xad, 0x40, 0x99, 0xc9, 0x80, 0xda, 0xed, 0xec, 0x29,
+	0x65, 0x3b, 0x2a, 0x01, 0x39, 0x22, 0x69, 0x52, 0x13, 0xad, 0x74, 0xe5, 0x41, 0x79, 0x3a, 0x23,
+	0x0f, 0xca, 0x19, 0x63, 0x78, 0x1e, 0x32, 0x11, 0xca, 0x1f, 0x14, 0x61, 0x84, 0xaf, 0xf8, 0x13,
+	0xb8, 0x86, 0x3d, 0x03, 0x65, 0xaf, 0xd5, 0xea, 0xf0, 0x6c, 0x12, 0xc3, 0xdc, 0xb3, 0x92, 0x0e,
+	0x4d, 0x55, 0x16, 0xe2, 0x04, 0x8e, 0x56, 0x85, 0x92, 0xb7, 0x47, 0x8c, 0x3e, 0xde, 0xf1, 0x85,
+	0x8a, 0x13, 0x3b, 0x5c, 0xa6, 0x50, 0x47, 0x5b, 0xa2, 0x0e, 0x46, 0x9f, 0x05, 0x88, 0xe2, 0xd0,
+	0xf3, 0xb7, 0x69, 0x99, 0x88, 0xde, 0xf8, 0xe1, 0x1e, 0xd4, 0xea, 0x0a, 0x99, 0xd3, 0x4c, 0xb6,
+	0xb9, 0x02, 0x60, 0x8d, 0x22, 0x5a, 0x30, 0x0e, 0xd7, 0xb9, 0x94, 0x96, 0x14, 0x38, 0xd5, 0xe4,
+	0xa8, 0x9d, 0x7b, 0x05, 0xca, 0x8a, 0x78, 0x3f, 0x95, 0xcf, 0xb8, 0x2e, 0x89, 0x7c, 0x02, 0xa6,
+	0x52, 0x7d, 0x3b, 0x92, 0xc6, 0xe8, 0xe7, 0x2d, 0x98, 0xe2, 0x9d, 0x59, 0xf1, 0xf7, 0x04, 0x03,
+	0x7e, 0x07, 0x4e, 0x37, 0x33, 0x18, 0xa1, 0x98, 0xfe, 0xc1, 0x19, 0xa7, 0xd2, 0x10, 0x65, 0x41,
+	0x71, 0x66, 0x1b, 0xe8, 0x0a, 0x5d, 0xe4, 0x94, 0xd1, 0x39, 0x4d, 0xe1, 0x68, 0x39, 0xce, 0x17,
+	0x38, 0x2f, 0xc3, 0x0a, 0x6a, 0xff, 0xb6, 0x05, 0x33, 0xbc, 0xe7, 0x37, 0xc8, 0xbe, 0x62, 0x07,
+	0x5f, 0xcb, 0xbe, 0x8b, 0x3c, 0x46, 0x85, 0x9c, 0x3c, 0x46, 0xfa, 0xa7, 0x15, 0x7b, 0x7e, 0xda,
+	0x4f, 0x59, 0x20, 0x56, 0xc8, 0x09, 0xdc, 0xfb, 0xbf, 0xd1, 0xbc, 0xf7, 0xcf, 0xe5, 0x6f, 0x82,
+	0x9c, 0x0b, 0xff, 0x9f, 0x59, 0x30, 0xcd, 0x11, 0x92, 0x07, 0xea, 0xaf, 0xe9, 0x3c, 0x0c, 0x92,
+	0xed, 0xf4, 0x06, 0xd9, 0xdf, 0x08, 0x6a, 0x4e, 0xbc, 0x93, 0xfd, 0x51, 0xc6, 0x64, 0x0d, 0xf5,
+	0x9c, 0x2c, 0x57, 0x6e, 0xa0, 0x23, 0xa4, 0x50, 0x3e, 0x72, 0x98, 0x7f, 0xfb, 0xab, 0x16, 0x20,
+	0xde, 0x8c, 0x21, 0x2b, 0x51, 0x09, 0x84, 0x95, 0x6a, 0x67, 0x4b, 0xc2, 0x9a, 0x14, 0x04, 0x6b,
+	0x58, 0xc7, 0x32, 0x3c, 0x29, 0x2b, 0x83, 0x62, 0x7f, 0x2b, 0x83, 0x23, 0x8c, 0xe8, 0x1f, 0x0c,
+	0x43, 0xda, 0x5d, 0x04, 0xdd, 0x81, 0xf1, 0x86, 0xd3, 0x76, 0x36, 0xbd, 0xa6, 0x17, 0x7b, 0x24,
+	0xea, 0x65, 0x9e, 0xb4, 0xac, 0xe1, 0x89, 0x77, 0x61, 0xad, 0x04, 0x1b, 0x74, 0xd0, 0x02, 0x40,
+	0x3b, 0xf4, 0xf6, 0xbc, 0x26, 0xd9, 0x66, 0xea, 0x09, 0xe6, 0xda, 0xcd, 0x6d, 0x6e, 0x64, 0x29,
+	0xd6, 0x30, 0x32, 0x7c, 0x67, 0x8b, 0x8f, 0xd8, 0x77, 0x16, 0x4e, 0xcc, 0x77, 0x76, 0xe8, 0x48,
+	0xbe, 0xb3, 0xa5, 0x23, 0xfb, 0xce, 0x0e, 0x0f, 0xe4, 0x3b, 0x8b, 0xe1, 0xac, 0x14, 0xf7, 0xe8,
+	0xff, 0x55, 0xaf, 0x49, 0x84, 0x8c, 0xcf, 0xfd, 0xd1, 0xe7, 0x1e, 0x1c, 0xcc, 0x9f, 0xc5, 0x99,
+	0x18, 0x38, 0xa7, 0x26, 0xfa, 0x24, 0xcc, 0x3a, 0xcd, 0x66, 0x70, 0x4f, 0x4d, 0xea, 0x4a, 0xd4,
+	0x70, 0x9a, 0x5c, 0xef, 0x3f, 0xca, 0xa8, 0x9e, 0x7f, 0x70, 0x30, 0x3f, 0xbb, 0x98, 0x83, 0x83,
+	0x73, 0x6b, 0xa3, 0x8f, 0x43, 0xb9, 0x1d, 0x06, 0x8d, 0x35, 0xcd, 0xa7, 0xed, 0x22, 0x1d, 0xc0,
+	0x9a, 0x2c, 0x3c, 0x3c, 0x98, 0x9f, 0x50, 0x7f, 0xd8, 0x81, 0x9f, 0x54, 0xb0, 0x77, 0xe1, 0x54,
+	0x9d, 0x84, 0x1e, 0x4b, 0x88, 0xec, 0x26, 0xfc, 0x63, 0x03, 0xca, 0x61, 0x8a, 0x63, 0x0e, 0x14,
+	0xd7, 0x4e, 0x8b, 0x87, 0x2e, 0x39, 0x64, 0x42, 0xc8, 0xfe, 0xdf, 0x16, 0x8c, 0x0a, 0xf7, 0x8d,
+	0x13, 0x90, 0xea, 0x16, 0x0d, 0xe5, 0xfa, 0x7c, 0xf6, 0xa9, 0xc2, 0x3a, 0x93, 0xab, 0x56, 0xaf,
+	0xa6, 0xd4, 0xea, 0x4f, 0xf4, 0x22, 0xd2, 0x5b, 0xa1, 0xfe, 0xb7, 0x8a, 0x30, 0x69, 0xfa, 0xf9,
+	0x9d, 0xc0, 0x10, 0xac, 0xc3, 0x68, 0x24, 0x1c, 0xd9, 0x0a, 0xf9, 0xe6, 0xdb, 0xe9, 0x49, 0x4c,
+	0x4c, 0xbb, 0x84, 0xeb, 0x9a, 0x24, 0x92, 0xe9, 0x21, 0x57, 0x7c, 0x84, 0x1e, 0x72, 0xfd, 0x5c,
+	0x2d, 0x87, 0x8e, 0xc3, 0xd5, 0xd2, 0xfe, 0x32, 0x3b, 0xd9, 0xf4, 0xf2, 0x13, 0x10, 0x7a, 0xae,
+	0x99, 0x67, 0xa0, 0xdd, 0x63, 0x65, 0x89, 0x4e, 0xe5, 0x08, 0x3f, 0x3f, 0x6b, 0xc1, 0x85, 0x8c,
+	0xaf, 0xd2, 0x24, 0xa1, 0x67, 0xa1, 0xe4, 0x74, 0x5c, 0x4f, 0xed, 0x65, 0xed, 0x89, 0x6d, 0x51,
+	0x94, 0x63, 0x85, 0x81, 0x96, 0x61, 0x86, 0xdc, 0x6f, 0x7b, 0xfc, 0x75, 0x51, 0xb7, 0xbf, 0x2c,
+	0xf2, 0x58, 0xdf, 0x2b, 0x69, 0x20, 0xee, 0xc6, 0x57, 0xe1, 0x27, 0x8a, 0xb9, 0xe1, 0x27, 0xfe,
+	0xa1, 0x05, 0x63, 0xca, 0x95, 0xeb, 0x91, 0x8f, 0xf6, 0x37, 0x99, 0xa3, 0xfd, 0x78, 0x8f, 0xd1,
+	0xce, 0x19, 0xe6, 0xdf, 0x2a, 0xa8, 0xfe, 0xd6, 0x82, 0x30, 0x1e, 0x40, 0xc2, 0x7a, 0x15, 0x4a,
+	0xed, 0x30, 0x88, 0x83, 0x46, 0xd0, 0x14, 0x02, 0xd6, 0xf9, 0x24, 0x0e, 0x0b, 0x2f, 0x3f, 0xd4,
+	0x7e, 0x63, 0x85, 0x4d, 0x65, 0x1b, 0xa7, 0xdd, 0x96, 0x00, 0x69, 0x96, 0xc5, 0xa2, 0x94, 0x26,
+	0xc5, 0x58, 0xc7, 0x61, 0x03, 0x1e, 0x84, 0xb1, 0x90, 0x83, 0x92, 0x01, 0x0f, 0xc2, 0x18, 0x33,
+	0x08, 0x72, 0x01, 0x62, 0x27, 0xdc, 0x26, 0x31, 0x2d, 0x13, 0xa1, 0xa2, 0xf2, 0xf9, 0x4d, 0x27,
+	0xf6, 0x9a, 0x0b, 0x9e, 0x1f, 0x47, 0x71, 0xb8, 0x50, 0xf5, 0xe3, 0x5b, 0x21, 0xbf, 0xe2, 0x69,
+	0xb1, 0x58, 0x14, 0x2d, 0xac, 0xd1, 0x95, 0x6e, 0xcb, 0xac, 0x8d, 0x61, 0xf3, 0x7d, 0x7f, 0x5d,
+	0x94, 0x63, 0x85, 0x61, 0xbf, 0xc2, 0x4e, 0x1f, 0x36, 0xa6, 0x47, 0x0b, 0x5e, 0xf2, 0x8b, 0x65,
+	0x35, 0x1b, 0xec, 0x71, 0xaf, 0xa2, 0x87, 0x48, 0xe9, 0xcd, 0xec, 0x69, 0xc3, 0xba, 0x0b, 0x53,
+	0x12, 0x47, 0x05, 0x7d, 0x73, 0x97, 0xcd, 0xc6, 0x73, 0x7d, 0x4e, 0x8d, 0x23, 0x58, 0x69, 0xb0,
+	0x94, 0x05, 0x2c, 0xa0, 0x7b, 0xb5, 0x26, 0xf6, 0x85, 0x96, 0xb2, 0x40, 0x00, 0x70, 0x82, 0x83,
+	0xae, 0x8a, 0x0b, 0x3c, 0x57, 0x7d, 0x3f, 0x9e, 0xba, 0xc0, 0xcb, 0xcf, 0xd7, 0x94, 0xe5, 0xcf,
+	0xc3, 0x98, 0x4a, 0xd8, 0x59, 0xe3, 0x79, 0x20, 0xc5, 0xb2, 0x59, 0x49, 0x8a, 0xb1, 0x8e, 0x83,
+	0x36, 0x60, 0x2a, 0xe2, 0xaa, 0x24, 0x15, 0x1f, 0x95, 0xab, 0xe4, 0x3e, 0x2c, 0x0d, 0x5d, 0xea,
+	0x26, 0xf8, 0x90, 0x15, 0x71, 0x6e, 0x23, 0x5d, 0x85, 0xd3, 0x24, 0xd0, 0xeb, 0x30, 0xd9, 0x0c,
+	0x1c, 0x77, 0xc9, 0x69, 0x3a, 0x7e, 0x83, 0x7d, 0x6f, 0xc9, 0xcc, 0xfb, 0x76, 0xd3, 0x80, 0xe2,
+	0x14, 0x36, 0x15, 0x96, 0xf4, 0x12, 0x11, 0xd3, 0xd7, 0xf1, 0xb7, 0x49, 0x24, 0xd2, 0x2f, 0x32,
+	0x61, 0xe9, 0x66, 0x0e, 0x0e, 0xce, 0xad, 0x8d, 0x5e, 0x85, 0x71, 0xf9, 0xf9, 0x9a, 0x67, 0x7d,
+	0x62, 0xdb, 0xaf, 0xc1, 0xb0, 0x81, 0x89, 0xee, 0xc1, 0x19, 0xf9, 0x7f, 0x23, 0x74, 0xb6, 0xb6,
+	0xbc, 0x86, 0x70, 0x37, 0xe5, 0x8e, 0x77, 0x8b, 0xd2, 0x3b, 0x6c, 0x25, 0x0b, 0xe9, 0xf0, 0x60,
+	0xfe, 0x92, 0x18, 0xb5, 0x4c, 0x38, 0x9b, 0xc4, 0x6c, 0xfa, 0x68, 0x0d, 0x4e, 0xed, 0x10, 0xa7,
+	0x19, 0xef, 0x2c, 0xef, 0x90, 0xc6, 0xae, 0xdc, 0x44, 0xcc, 0x5f, 0x5f, 0xb3, 0x88, 0xbf, 0xde,
+	0x8d, 0x82, 0xb3, 0xea, 0xa1, 0xb7, 0x60, 0xb6, 0xdd, 0xd9, 0x6c, 0x7a, 0xd1, 0xce, 0x7a, 0x10,
+	0x33, 0x6b, 0x17, 0x95, 0xff, 0x53, 0x38, 0xf6, 0xab, 0x88, 0x08, 0xb5, 0x1c, 0x3c, 0x9c, 0x4b,
+	0x01, 0xbd, 0x03, 0x67, 0x52, 0x8b, 0x41, 0xb8, 0x36, 0x4f, 0xe6, 0x47, 0x48, 0xaf, 0x67, 0x55,
+	0x10, 0x51, 0x02, 0xb2, 0x40, 0x38, 0xbb, 0x09, 0xf4, 0x12, 0x94, 0xbc, 0xf6, 0xaa, 0xd3, 0xf2,
+	0x9a, 0xfb, 0x2c, 0xc4, 0x7b, 0x99, 0x85, 0x3d, 0x2f, 0x55, 0x6b, 0xbc, 0xec, 0x50, 0xfb, 0x8d,
+	0x15, 0x26, 0xbd, 0x22, 0x68, 0x81, 0x2c, 0xa3, 0xd9, 0xe9, 0xc4, 0x98, 0x57, 0x8b, 0x76, 0x19,
+	0x61, 0x03, 0xeb, 0xdd, 0xd9, 0x48, 0xbd, 0x4d, 0x2b, 0x6b, 0x32, 0x23, 0xfa, 0x1c, 0x8c, 0xeb,
+	0x2b, 0x56, 0x9c, 0x7f, 0x97, 0xb3, 0x45, 0x2a, 0x6d, 0x65, 0x73, 0x89, 0x53, 0xad, 0x5e, 0x1d,
+	0x86, 0x0d, 0x8a, 0x36, 0x81, 0xec, 0xb1, 0x44, 0x37, 0xa1, 0xd4, 0x68, 0x7a, 0xc4, 0x8f, 0xab,
+	0xb5, 0x5e, 0x31, 0x98, 0x96, 0x05, 0x8e, 0x98, 0x1c, 0x11, 0xbe, 0x9a, 0x97, 0x61, 0x45, 0xc1,
+	0xfe, 0x95, 0x02, 0xcc, 0xf7, 0x89, 0x85, 0x9e, 0x52, 0xe5, 0x5b, 0x03, 0xa9, 0xf2, 0x17, 0x65,
+	0xe6, 0xd4, 0xf5, 0x94, 0xca, 0x22, 0x95, 0x15, 0x35, 0x51, 0x5c, 0xa4, 0xf1, 0x07, 0x36, 0xad,
+	0xd6, 0x5f, 0x03, 0x86, 0xfa, 0x3a, 0x07, 0x18, 0xaf, 0x80, 0xc3, 0x83, 0xdf, 0x93, 0x72, 0x5f,
+	0x74, 0xec, 0x2f, 0x17, 0xe0, 0x8c, 0x1a, 0xc2, 0xaf, 0xdf, 0x81, 0xbb, 0xdd, 0x3d, 0x70, 0xc7,
+	0xf0, 0x1e, 0x66, 0xdf, 0x82, 0x11, 0x1e, 0x54, 0x6a, 0x00, 0xf9, 0xec, 0x49, 0x33, 0xfe, 0xa2,
+	0x12, 0x09, 0x8c, 0x18, 0x8c, 0xdf, 0x6d, 0xc1, 0xd4, 0xc6, 0x72, 0xad, 0x1e, 0x34, 0x76, 0x49,
+	0xbc, 0xc8, 0xe5, 0x69, 0x2c, 0x64, 0x2d, 0xeb, 0x21, 0x65, 0xa8, 0x2c, 0xe9, 0xec, 0x12, 0x0c,
+	0xed, 0x04, 0x51, 0x9c, 0x7e, 0x2c, 0xbf, 0x1e, 0x44, 0x31, 0x66, 0x10, 0xfb, 0x77, 0x2c, 0x18,
+	0x66, 0xb9, 0xc2, 0xfb, 0x65, 0xab, 0x1f, 0xe4, 0xbb, 0xd0, 0xcb, 0x30, 0x42, 0xb6, 0xb6, 0x48,
+	0x23, 0x16, 0xb3, 0x2a, 0xbd, 0x9b, 0x47, 0x56, 0x58, 0x29, 0x15, 0x30, 0x58, 0x63, 0xfc, 0x2f,
+	0x16, 0xc8, 0xe8, 0x2e, 0x94, 0x63, 0xaf, 0x45, 0x16, 0x5d, 0x57, 0x3c, 0x37, 0x3e, 0x84, 0x87,
+	0xf6, 0x86, 0x24, 0x80, 0x13, 0x5a, 0xf6, 0x97, 0x0a, 0x00, 0x49, 0xb4, 0x8f, 0x7e, 0x9f, 0xb8,
+	0xd4, 0xf5, 0x10, 0x75, 0x39, 0xe3, 0x21, 0x0a, 0x25, 0x04, 0x33, 0x5e, 0xa1, 0xd4, 0x30, 0x15,
+	0x07, 0x1a, 0xa6, 0xa1, 0xa3, 0x0c, 0xd3, 0x32, 0xcc, 0x24, 0xd1, 0x4a, 0xcc, 0x60, 0x4d, 0xec,
+	0x0e, 0xb5, 0x91, 0x06, 0xe2, 0x6e, 0x7c, 0x9b, 0xc0, 0x25, 0x15, 0xb4, 0x41, 0x9c, 0x35, 0xcc,
+	0x9a, 0x55, 0x7f, 0xd8, 0xeb, 0x33, 0x4e, 0xc9, 0x4b, 0x5b, 0x21, 0xf7, 0xa5, 0xed, 0x47, 0x2d,
+	0x38, 0x9d, 0x6e, 0x87, 0xb9, 0x17, 0x7e, 0xd1, 0x82, 0x33, 0xec, 0xbd, 0x91, 0xb5, 0xda, 0xfd,
+	0xba, 0xf9, 0x52, 0xcf, 0x40, 0x14, 0x39, 0x3d, 0x4e, 0xdc, 0xe8, 0xd7, 0xb2, 0x48, 0xe3, 0xec,
+	0x16, 0xed, 0xff, 0x58, 0x80, 0xd9, 0xbc, 0x08, 0x16, 0xcc, 0xd8, 0xdd, 0xb9, 0x5f, 0xdf, 0x25,
+	0xf7, 0x84, 0x49, 0x71, 0x62, 0xec, 0xce, 0x8b, 0xb1, 0x84, 0xa7, 0xc3, 0x5b, 0x17, 0x06, 0x0b,
+	0x6f, 0x8d, 0x76, 0x60, 0xe6, 0xde, 0x0e, 0xf1, 0x6f, 0xfb, 0x91, 0x13, 0x7b, 0xd1, 0x96, 0xc7,
+	0x1e, 0x0a, 0xf9, 0xba, 0xf9, 0x98, 0x34, 0xfc, 0xbd, 0x9b, 0x46, 0x38, 0x3c, 0x98, 0xbf, 0x60,
+	0x14, 0x24, 0x5d, 0xe6, 0x8c, 0x04, 0x77, 0x13, 0xed, 0x8e, 0x0e, 0x3e, 0xf4, 0x08, 0xa3, 0x83,
+	0xdb, 0x5f, 0xb4, 0xe0, 0x5c, 0x6e, 0xf6, 0x3e, 0x74, 0x05, 0x4a, 0x4e, 0xdb, 0xe3, 0xba, 0x56,
+	0xc1, 0x46, 0x99, 0xce, 0xa0, 0x56, 0xe5, 0x9a, 0x56, 0x05, 0x55, 0x59, 0x85, 0x0b, 0xb9, 0x59,
+	0x85, 0xfb, 0x26, 0x09, 0xb6, 0xbf, 0xcb, 0x02, 0xe1, 0xa8, 0x37, 0x00, 0xef, 0xfe, 0xb4, 0x4c,
+	0xca, 0x6e, 0x64, 0x10, 0xb9, 0x94, 0xef, 0xb9, 0x28, 0xf2, 0x86, 0x28, 0x59, 0xc9, 0xc8, 0x16,
+	0x62, 0xd0, 0xb2, 0x5d, 0x10, 0xd0, 0x0a, 0x61, 0x9a, 0xca, 0xfe, 0xbd, 0x79, 0x01, 0xc0, 0x65,
+	0xb8, 0x5a, 0x6a, 0x66, 0x75, 0x32, 0x57, 0x14, 0x04, 0x6b, 0x58, 0xf6, 0xbf, 0x2b, 0xc0, 0x98,
+	0xcc, 0x58, 0xd1, 0xf1, 0x07, 0xd1, 0x27, 0x1c, 0x29, 0x85, 0x1d, 0xcb, 0x65, 0x4e, 0x09, 0xd7,
+	0x12, 0x35, 0x4c, 0x92, 0xcb, 0x5c, 0x02, 0x70, 0x82, 0x43, 0x77, 0x51, 0xd4, 0xd9, 0x64, 0xe8,
+	0x29, 0xb7, 0xb2, 0x3a, 0x2f, 0xc6, 0x12, 0x8e, 0x3e, 0x09, 0xd3, 0xbc, 0x5e, 0x18, 0xb4, 0x9d,
+	0x6d, 0xae, 0xc4, 0x1e, 0x56, 0xfe, 0xe0, 0xd3, 0x6b, 0x29, 0xd8, 0xe1, 0xc1, 0xfc, 0xe9, 0x74,
+	0x19, 0x7b, 0x9d, 0xe9, 0xa2, 0xc2, 0xcc, 0x43, 0x78, 0x23, 0x74, 0xf7, 0x77, 0x59, 0x95, 0x24,
+	0x20, 0xac, 0xe3, 0xd9, 0x9f, 0x03, 0xd4, 0x9d, 0xbb, 0x03, 0xbd, 0xc1, 0x6d, 0x02, 0xbd, 0x90,
+	0xb8, 0xbd, 0x5e, 0x6b, 0x74, 0xaf, 0x67, 0xe9, 0x11, 0xc2, 0x6b, 0x61, 0x55, 0xdf, 0xfe, 0xab,
+	0x45, 0x98, 0x4e, 0xfb, 0xc0, 0xa2, 0xeb, 0x30, 0xc2, 0x45, 0x0f, 0x41, 0xbe, 0x87, 0x31, 0x80,
+	0xe6, 0x39, 0xcb, 0x98, 0xb0, 0x90, 0x5e, 0x44, 0x7d, 0xf4, 0x16, 0x8c, 0xb9, 0xc1, 0x3d, 0xff,
+	0x9e, 0x13, 0xba, 0x8b, 0xb5, 0xaa, 0x58, 0xce, 0x99, 0xb7, 0xa5, 0x4a, 0x82, 0xa6, 0x7b, 0xe3,
+	0xb2, 0x87, 0xaf, 0x04, 0x84, 0x75, 0x72, 0x68, 0x83, 0x85, 0x1a, 0xde, 0xf2, 0xb6, 0xd7, 0x9c,
+	0x76, 0x2f, 0x03, 0xf1, 0x65, 0x89, 0xa4, 0x51, 0x9e, 0x10, 0xf1, 0x88, 0x39, 0x00, 0x27, 0x84,
+	0xd0, 0xb7, 0xc0, 0xa9, 0x28, 0x47, 0x27, 0x9b, 0x97, 0xca, 0xa9, 0x97, 0x9a, 0x72, 0xe9, 0x31,
+	0x7a, 0x8f, 0xcd, 0xd2, 0xde, 0x66, 0x35, 0x63, 0xff, 0xea, 0x29, 0x30, 0x36, 0xb1, 0x91, 0xd9,
+	0xcf, 0x3a, 0xa6, 0xcc, 0x7e, 0x18, 0x4a, 0xa4, 0xd5, 0x8e, 0xf7, 0x2b, 0x5e, 0xd8, 0x2b, 0x35,
+	0xec, 0x8a, 0xc0, 0xe9, 0xa6, 0x29, 0x21, 0x58, 0xd1, 0xc9, 0x4e, 0xbf, 0x58, 0xfc, 0x1a, 0xa6,
+	0x5f, 0x1c, 0x3a, 0xc1, 0xf4, 0x8b, 0xeb, 0x30, 0xba, 0xed, 0xc5, 0x98, 0xb4, 0x03, 0x21, 0xf4,
+	0x67, 0xae, 0xc3, 0x6b, 0x1c, 0xa5, 0x3b, 0xd1, 0x97, 0x00, 0x60, 0x49, 0x04, 0xbd, 0xa1, 0x76,
+	0xe0, 0x48, 0xfe, 0x9d, 0xb9, 0xfb, 0xd5, 0x3a, 0x73, 0x0f, 0x8a, 0x24, 0x8b, 0xa3, 0x0f, 0x9b,
+	0x64, 0x71, 0x55, 0xa6, 0x46, 0x2c, 0xe5, 0x7b, 0x73, 0xb0, 0xcc, 0x87, 0x7d, 0x12, 0x22, 0xde,
+	0xd1, 0xd3, 0x49, 0x96, 0xf3, 0x39, 0x81, 0xca, 0x14, 0x39, 0x60, 0x12, 0xc9, 0xef, 0xb2, 0xe0,
+	0x4c, 0x3b, 0x2b, 0xb3, 0xaa, 0x78, 0xe0, 0x7d, 0x79, 0xe0, 0xd4, 0xb1, 0x46, 0x83, 0x4c, 0x51,
+	0x93, 0x89, 0x86, 0xb3, 0x9b, 0xa3, 0x03, 0x1d, 0x6e, 0xba, 0x22, 0x0b, 0xe2, 0x93, 0x39, 0xd9,
+	0x28, 0x7b, 0xe4, 0xa0, 0xdc, 0xc8, 0xc8, 0x7c, 0xf8, 0xc1, 0xbc, 0xcc, 0x87, 0x03, 0xe7, 0x3b,
+	0x7c, 0x43, 0xe5, 0xa1, 0x9c, 0xc8, 0x5f, 0x4a, 0x3c, 0xcb, 0x64, 0xdf, 0xec, 0x93, 0x6f, 0xa8,
+	0xec, 0x93, 0x3d, 0xe2, 0x54, 0xf2, 0xdc, 0x92, 0x7d, 0x73, 0x4e, 0x6a, 0x79, 0x23, 0xa7, 0x8e,
+	0x27, 0x6f, 0xa4, 0x71, 0xd4, 0xf0, 0xd4, 0x85, 0xcf, 0xf4, 0x39, 0x6a, 0x0c, 0xba, 0xbd, 0x0f,
+	0x1b, 0x9e, 0x23, 0x73, 0xe6, 0xa1, 0x72, 0x64, 0xde, 0xd1, 0x73, 0x4e, 0xa2, 0x3e, 0x49, 0x15,
+	0x29, 0xd2, 0x80, 0x99, 0x26, 0xef, 0xe8, 0x07, 0xe0, 0xa9, 0x7c, 0xba, 0xea, 0x9c, 0xeb, 0xa6,
+	0x9b, 0x79, 0x04, 0x76, 0x65, 0xb0, 0x3c, 0x7d, 0x32, 0x19, 0x2c, 0xcf, 0x1c, 0x7b, 0x06, 0xcb,
+	0xb3, 0x27, 0x90, 0xc1, 0xf2, 0xb1, 0x13, 0xcc, 0x60, 0x79, 0x87, 0x59, 0x45, 0xf0, 0x70, 0x27,
+	0x22, 0xae, 0x66, 0x76, 0x0c, 0xc7, 0xac, 0x98, 0x28, 0xfc, 0xe3, 0x14, 0x08, 0x27, 0xa4, 0x32,
+	0x32, 0x63, 0xce, 0x3e, 0x82, 0xcc, 0x98, 0xeb, 0x49, 0x66, 0xcc, 0x73, 0xf9, 0x53, 0x9d, 0x61,
+	0xba, 0x9e, 0x93, 0x0f, 0xf3, 0x8e, 0x9e, 0xc7, 0xf2, 0xf1, 0x1e, 0xaa, 0xf8, 0x2c, 0xc5, 0x63,
+	0x8f, 0xec, 0x95, 0xaf, 0xf3, 0xec, 0x95, 0xe7, 0xf3, 0x39, 0x79, 0xfa, 0xb8, 0x33, 0x73, 0x56,
+	0x7e, 0x4f, 0x01, 0x2e, 0xf6, 0xde, 0x17, 0x89, 0xd6, 0xb3, 0x96, 0xbc, 0x08, 0xa6, 0xb4, 0x9e,
+	0xfc, 0x6e, 0x95, 0x60, 0x0d, 0x1c, 0x09, 0xeb, 0x1a, 0xcc, 0x28, 0xdb, 0xf4, 0xa6, 0xd7, 0xd8,
+	0xd7, 0xd2, 0xf4, 0x2b, 0x7f, 0xde, 0x7a, 0x1a, 0x01, 0x77, 0xd7, 0x41, 0x8b, 0x30, 0x65, 0x14,
+	0x56, 0x2b, 0xe2, 0x0e, 0xa5, 0xd4, 0xac, 0x75, 0x13, 0x8c, 0xd3, 0xf8, 0xf6, 0x4f, 0x5a, 0xf0,
+	0x58, 0x4e, 0x72, 0xa8, 0x81, 0x03, 0x3d, 0x6d, 0xc1, 0x54, 0xdb, 0xac, 0xda, 0x27, 0x1e, 0x9c,
+	0x91, 0x82, 0x4a, 0xf5, 0x35, 0x05, 0xc0, 0x69, 0xa2, 0xf6, 0x9f, 0x5a, 0x70, 0xa1, 0xa7, 0xe5,
+	0x17, 0xc2, 0x70, 0x76, 0xbb, 0x15, 0x39, 0xcb, 0x21, 0x71, 0x89, 0x1f, 0x7b, 0x4e, 0xb3, 0xde,
+	0x26, 0x0d, 0x4d, 0x6f, 0xcd, 0x4c, 0xa8, 0xae, 0xad, 0xd5, 0x17, 0xbb, 0x31, 0x70, 0x4e, 0x4d,
+	0xb4, 0x0a, 0xa8, 0x1b, 0x22, 0x66, 0x98, 0xc5, 0x8c, 0xed, 0xa6, 0x87, 0x33, 0x6a, 0xa0, 0x57,
+	0x60, 0x42, 0x59, 0x94, 0x69, 0x33, 0xce, 0x18, 0x30, 0xd6, 0x01, 0xd8, 0xc4, 0x5b, 0xba, 0xf2,
+	0xeb, 0xbf, 0x77, 0xf1, 0x03, 0xbf, 0xf9, 0x7b, 0x17, 0x3f, 0xf0, 0xdb, 0xbf, 0x77, 0xf1, 0x03,
+	0xdf, 0xf6, 0xe0, 0xa2, 0xf5, 0xeb, 0x0f, 0x2e, 0x5a, 0xbf, 0xf9, 0xe0, 0xa2, 0xf5, 0xdb, 0x0f,
+	0x2e, 0x5a, 0xbf, 0xfb, 0xe0, 0xa2, 0xf5, 0xa5, 0xdf, 0xbf, 0xf8, 0x81, 0x4f, 0x17, 0xf6, 0x9e,
+	0xff, 0x7f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x5c, 0x2b, 0xc8, 0x61, 0xd8, 0xfd, 0x00, 0x00,
 }
 
 func (m *AWSElasticBlockStoreVolumeSource) Marshal() (dAtA []byte, err error) {
@@ -7889,6 +7895,16 @@ func (m *ConfigMap) MarshalToSizedBuffer(dAtA []byte) (int, error) {
 	_ = i
 	var l int
 	_ = l
+	if m.Immutable != nil {
+		i--
+		if *m.Immutable {
+			dAtA[i] = 1
+		} else {
+			dAtA[i] = 0
+		}
+		i--
+		dAtA[i] = 0x20
+	}
 	if len(m.BinaryData) > 0 {
 		keysForBinaryData := make([]string, 0, len(m.BinaryData))
 		for k := range m.BinaryData {
@@ -9132,6 +9148,13 @@ func (m *EndpointPort) MarshalToSizedBuffer(dAtA []byte) (int, error) {
 	_ = i
 	var l int
 	_ = l
+	if m.AppProtocol != nil {
+		i -= len(*m.AppProtocol)
+		copy(dAtA[i:], *m.AppProtocol)
+		i = encodeVarintGenerated(dAtA, i, uint64(len(*m.AppProtocol)))
+		i--
+		dAtA[i] = 0x22
+	}
 	i -= len(m.Protocol)
 	copy(dAtA[i:], m.Protocol)
 	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Protocol)))
@@ -14500,6 +14523,13 @@ func (m *PodSecurityContext) MarshalToSizedBuffer(dAtA []byte) (int, error) {
 	_ = i
 	var l int
 	_ = l
+	if m.FSGroupChangePolicy != nil {
+		i -= len(*m.FSGroupChangePolicy)
+		copy(dAtA[i:], *m.FSGroupChangePolicy)
+		i = encodeVarintGenerated(dAtA, i, uint64(len(*m.FSGroupChangePolicy)))
+		i--
+		dAtA[i] = 0x4a
+	}
 	if m.WindowsOptions != nil {
 		{
 			size, err := m.WindowsOptions.MarshalToSizedBuffer(dAtA[:i])
@@ -16797,6 +16827,16 @@ func (m *Secret) MarshalToSizedBuffer(dAtA []byte) (int, error) {
 	_ = i
 	var l int
 	_ = l
+	if m.Immutable != nil {
+		i--
+		if *m.Immutable {
+			dAtA[i] = 1
+		} else {
+			dAtA[i] = 0
+		}
+		i--
+		dAtA[i] = 0x28
+	}
 	if len(m.StringData) > 0 {
 		keysForStringData := make([]string, 0, len(m.StringData))
 		for k := range m.StringData {
@@ -17575,6 +17615,13 @@ func (m *ServicePort) MarshalToSizedBuffer(dAtA []byte) (int, error) {
 	_ = i
 	var l int
 	_ = l
+	if m.AppProtocol != nil {
+		i -= len(*m.AppProtocol)
+		copy(dAtA[i:], *m.AppProtocol)
+		i = encodeVarintGenerated(dAtA, i, uint64(len(*m.AppProtocol)))
+		i--
+		dAtA[i] = 0x32
+	}
 	i = encodeVarintGenerated(dAtA, i, uint64(m.NodePort))
 	i--
 	dAtA[i] = 0x28
@@ -19458,6 +19505,9 @@ func (m *ConfigMap) Size() (n int) {
 			n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize))
 		}
 	}
+	if m.Immutable != nil {
+		n += 2
+	}
 	return n
 }
 
@@ -19895,6 +19945,10 @@ func (m *EndpointPort) Size() (n int) {
 	n += 1 + sovGenerated(uint64(m.Port))
 	l = len(m.Protocol)
 	n += 1 + l + sovGenerated(uint64(l))
+	if m.AppProtocol != nil {
+		l = len(*m.AppProtocol)
+		n += 1 + l + sovGenerated(uint64(l))
+	}
 	return n
 }
 
@@ -21877,6 +21931,10 @@ func (m *PodSecurityContext) Size() (n int) {
 		l = m.WindowsOptions.Size()
 		n += 1 + l + sovGenerated(uint64(l))
 	}
+	if m.FSGroupChangePolicy != nil {
+		l = len(*m.FSGroupChangePolicy)
+		n += 1 + l + sovGenerated(uint64(l))
+	}
 	return n
 }
 
@@ -22696,6 +22754,9 @@ func (m *Secret) Size() (n int) {
 			n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize))
 		}
 	}
+	if m.Immutable != nil {
+		n += 2
+	}
 	return n
 }
 
@@ -22961,6 +23022,10 @@ func (m *ServicePort) Size() (n int) {
 	l = m.TargetPort.Size()
 	n += 1 + l + sovGenerated(uint64(l))
 	n += 1 + sovGenerated(uint64(m.NodePort))
+	if m.AppProtocol != nil {
+		l = len(*m.AppProtocol)
+		n += 1 + l + sovGenerated(uint64(l))
+	}
 	return n
 }
 
@@ -23801,6 +23866,7 @@ func (this *ConfigMap) String() string {
 		`ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`,
 		`Data:` + mapStringForData + `,`,
 		`BinaryData:` + mapStringForBinaryData + `,`,
+		`Immutable:` + valueToStringGenerated(this.Immutable) + `,`,
 		`}`,
 	}, "")
 	return s
@@ -24127,6 +24193,7 @@ func (this *EndpointPort) String() string {
 		`Name:` + fmt.Sprintf("%v", this.Name) + `,`,
 		`Port:` + fmt.Sprintf("%v", this.Port) + `,`,
 		`Protocol:` + fmt.Sprintf("%v", this.Protocol) + `,`,
+		`AppProtocol:` + valueToStringGenerated(this.AppProtocol) + `,`,
 		`}`,
 	}, "")
 	return s
@@ -25629,6 +25696,7 @@ func (this *PodSecurityContext) String() string {
 		`RunAsGroup:` + valueToStringGenerated(this.RunAsGroup) + `,`,
 		`Sysctls:` + repeatedStringForSysctls + `,`,
 		`WindowsOptions:` + strings.Replace(this.WindowsOptions.String(), "WindowsSecurityContextOptions", "WindowsSecurityContextOptions", 1) + `,`,
+		`FSGroupChangePolicy:` + valueToStringGenerated(this.FSGroupChangePolicy) + `,`,
 		`}`,
 	}, "")
 	return s
@@ -26301,6 +26369,7 @@ func (this *Secret) String() string {
 		`Data:` + mapStringForData + `,`,
 		`Type:` + fmt.Sprintf("%v", this.Type) + `,`,
 		`StringData:` + mapStringForStringData + `,`,
+		`Immutable:` + valueToStringGenerated(this.Immutable) + `,`,
 		`}`,
 	}, "")
 	return s
@@ -26508,6 +26577,7 @@ func (this *ServicePort) String() string {
 		`Port:` + fmt.Sprintf("%v", this.Port) + `,`,
 		`TargetPort:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.TargetPort), "IntOrString", "intstr.IntOrString", 1), `&`, ``, 1) + `,`,
 		`NodePort:` + fmt.Sprintf("%v", this.NodePort) + `,`,
+		`AppProtocol:` + valueToStringGenerated(this.AppProtocol) + `,`,
 		`}`,
 	}, "")
 	return s
@@ -30524,6 +30594,27 @@ func (m *ConfigMap) Unmarshal(dAtA []byte) error {
 			}
 			m.BinaryData[mapkey] = mapvalue
 			iNdEx = postIndex
+		case 4:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Immutable", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			b := bool(v != 0)
+			m.Immutable = &b
 		default:
 			iNdEx = preIndex
 			skippy, err := skipGenerated(dAtA[iNdEx:])
@@ -34258,6 +34349,39 @@ func (m *EndpointPort) Unmarshal(dAtA []byte) error {
 			}
 			m.Protocol = Protocol(dAtA[iNdEx:postIndex])
 			iNdEx = postIndex
+		case 4:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field AppProtocol", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			s := string(dAtA[iNdEx:postIndex])
+			m.AppProtocol = &s
+			iNdEx = postIndex
 		default:
 			iNdEx = preIndex
 			skippy, err := skipGenerated(dAtA[iNdEx:])
@@ -51715,6 +51839,39 @@ func (m *PodSecurityContext) Unmarshal(dAtA []byte) error {
 				return err
 			}
 			iNdEx = postIndex
+		case 9:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field FSGroupChangePolicy", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			s := PodFSGroupChangePolicy(dAtA[iNdEx:postIndex])
+			m.FSGroupChangePolicy = &s
+			iNdEx = postIndex
 		default:
 			iNdEx = preIndex
 			skippy, err := skipGenerated(dAtA[iNdEx:])
@@ -59523,6 +59680,27 @@ func (m *Secret) Unmarshal(dAtA []byte) error {
 			}
 			m.StringData[mapkey] = mapvalue
 			iNdEx = postIndex
+		case 5:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Immutable", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			b := bool(v != 0)
+			m.Immutable = &b
 		default:
 			iNdEx = preIndex
 			skippy, err := skipGenerated(dAtA[iNdEx:])
@@ -61603,6 +61781,39 @@ func (m *ServicePort) Unmarshal(dAtA []byte) error {
 					break
 				}
 			}
+		case 6:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field AppProtocol", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			s := string(dAtA[iNdEx:postIndex])
+			m.AppProtocol = &s
+			iNdEx = postIndex
 		default:
 			iNdEx = preIndex
 			skippy, err := skipGenerated(dAtA[iNdEx:])
@@ -66311,6 +66522,7 @@ func (m *WindowsSecurityContextOptions) Unmarshal(dAtA []byte) error {
 func skipGenerated(dAtA []byte) (n int, err error) {
 	l := len(dAtA)
 	iNdEx := 0
+	depth := 0
 	for iNdEx < l {
 		var wire uint64
 		for shift := uint(0); ; shift += 7 {
@@ -66342,10 +66554,8 @@ func skipGenerated(dAtA []byte) (n int, err error) {
 					break
 				}
 			}
-			return iNdEx, nil
 		case 1:
 			iNdEx += 8
-			return iNdEx, nil
 		case 2:
 			var length int
 			for shift := uint(0); ; shift += 7 {
@@ -66366,55 +66576,30 @@ func skipGenerated(dAtA []byte) (n int, err error) {
 				return 0, ErrInvalidLengthGenerated
 			}
 			iNdEx += length
-			if iNdEx < 0 {
-				return 0, ErrInvalidLengthGenerated
-			}
-			return iNdEx, nil
 		case 3:
-			for {
-				var innerWire uint64
-				var start int = iNdEx
-				for shift := uint(0); ; shift += 7 {
-					if shift >= 64 {
-						return 0, ErrIntOverflowGenerated
-					}
-					if iNdEx >= l {
-						return 0, io.ErrUnexpectedEOF
-					}
-					b := dAtA[iNdEx]
-					iNdEx++
-					innerWire |= (uint64(b) & 0x7F) << shift
-					if b < 0x80 {
-						break
-					}
-				}
-				innerWireType := int(innerWire & 0x7)
-				if innerWireType == 4 {
-					break
-				}
-				next, err := skipGenerated(dAtA[start:])
-				if err != nil {
-					return 0, err
-				}
-				iNdEx = start + next
-				if iNdEx < 0 {
-					return 0, ErrInvalidLengthGenerated
-				}
-			}
-			return iNdEx, nil
+			depth++
 		case 4:
-			return iNdEx, nil
+			if depth == 0 {
+				return 0, ErrUnexpectedEndOfGroupGenerated
+			}
+			depth--
 		case 5:
 			iNdEx += 4
-			return iNdEx, nil
 		default:
 			return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
 		}
+		if iNdEx < 0 {
+			return 0, ErrInvalidLengthGenerated
+		}
+		if depth == 0 {
+			return iNdEx, nil
+		}
 	}
-	panic("unreachable")
+	return 0, io.ErrUnexpectedEOF
 }
 
 var (
-	ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling")
-	ErrIntOverflowGenerated   = fmt.Errorf("proto: integer overflow")
+	ErrInvalidLengthGenerated        = fmt.Errorf("proto: negative length found during unmarshaling")
+	ErrIntOverflowGenerated          = fmt.Errorf("proto: integer overflow")
+	ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group")
 )
diff --git a/vendor/k8s.io/api/core/v1/generated.proto b/vendor/k8s.io/api/core/v1/generated.proto
index c05e2351..d1cd8ebb 100644
--- a/vendor/k8s.io/api/core/v1/generated.proto
+++ b/vendor/k8s.io/api/core/v1/generated.proto
@@ -455,6 +455,14 @@ message ConfigMap {
   // +optional
   optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
 
+  // Immutable, if set to true, ensures that data stored in the ConfigMap cannot
+  // be updated (only object metadata can be modified).
+  // If not set to true, the field can be modified at any time.
+  // Defaulted to nil.
+  // This is an alpha field enabled by ImmutableEphemeralVolumes feature gate.
+  // +optional
+  optional bool immutable = 4;
+
   // Data contains the configuration data.
   // Each key must consist of alphanumeric characters, '-', '_' or '.'.
   // Values with non-UTF-8 byte sequences must use the BinaryData field.
@@ -681,7 +689,6 @@ message Container {
   repeated VolumeMount volumeMounts = 9;
 
   // volumeDevices is the list of block devices to be used by the container.
-  // This is a beta feature.
   // +patchMergeKey=devicePath
   // +patchStrategy=merge
   // +optional
@@ -707,7 +714,7 @@ message Container {
   // This can be used to provide different probe parameters at the beginning of a Pod's lifecycle,
   // when it might take a long time to load data or warm a cache, than during steady-state operation.
   // This cannot be updated.
-  // This is an alpha feature enabled by the StartupProbe feature flag.
+  // This is a beta feature enabled by the StartupProbe feature flag.
   // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
   // +optional
   optional Probe startupProbe = 22;
@@ -1034,6 +1041,16 @@ message EndpointPort {
   // Default is TCP.
   // +optional
   optional string protocol = 3;
+
+  // The application protocol for this port.
+  // This field follows standard Kubernetes label syntax.
+  // Un-prefixed names are reserved for IANA standard service names (as per
+  // RFC-6335 and http://www.iana.org/assignments/service-names).
+  // Non-standard protocols should use prefixed names such as
+  // mycompany.com/my-custom-protocol.
+  // Field can be enabled with ServiceAppProtocol feature gate.
+  // +optional
+  optional string appProtocol = 4;
 }
 
 // EndpointSubset is a group of addresses with a common set of ports. The
@@ -1258,7 +1275,6 @@ message EphemeralContainerCommon {
   repeated VolumeMount volumeMounts = 9;
 
   // volumeDevices is the list of block devices to be used by the container.
-  // This is a beta feature.
   // +patchMergeKey=devicePath
   // +patchStrategy=merge
   // +optional
@@ -1913,7 +1929,6 @@ message LimitRange {
 // LimitRangeItem defines a min/max usage limit for any resource that matches on kind.
 message LimitRangeItem {
   // Type of resource that this limit applies to.
-  // +optional
   optional string type = 1;
 
   // Max usage constraints on this kind by resource name.
@@ -2455,6 +2470,20 @@ message ObjectFieldSelector {
 }
 
 // ObjectReference contains enough information to let you inspect or modify the referred object.
+// ---
+// New uses of this type are discouraged because of difficulty describing its usage when embedded in APIs.
+//  1. Ignored fields.  It includes many fields which are not generally honored.  For instance, ResourceVersion and FieldPath are both very rarely valid in actual usage.
+//  2. Invalid usage help.  It is impossible to add specific help for individual usage.  In most embedded usages, there are particular
+//     restrictions like, "must refer only to types A and B" or "UID not honored" or "name must be restricted".
+//     Those cannot be well described when embedded.
+//  3. Inconsistent validation.  Because the usages are different, the validation rules are different by usage, which makes it hard for users to predict what will happen.
+//  4. The fields are both imprecise and overly precise.  Kind is not a precise mapping to a URL. This can produce ambiguity
+//     during interpretation and require a REST mapping.  In most cases, the dependency is on the group,resource tuple
+//     and the version of the actual struct is irrelevant.
+//  5. We cannot easily change it.  Because this type is embedded in many locations, updates to this type
+//     will affect numerous schemas.  Don't make new APIs embed an underspecified API type they do not control.
+// Instead of using this type, create a locally provided and used type that is well-focused on your reference.
+// For example, ServiceReferences for admission registration: https://github.com/kubernetes/api/blob/release-1.17/admissionregistration/v1/types.go#L533 .
 // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
 message ObjectReference {
   // Kind of the referent.
@@ -2605,15 +2634,18 @@ message PersistentVolumeClaimSpec {
 
   // volumeMode defines what type of volume is required by the claim.
   // Value of Filesystem is implied when not included in claim spec.
-  // This is a beta feature.
   // +optional
   optional string volumeMode = 6;
 
-  // This field requires the VolumeSnapshotDataSource alpha feature gate to be
-  // enabled and currently VolumeSnapshot is the only supported data source.
-  // If the provisioner can support VolumeSnapshot data source, it will create
-  // a new volume and data will be restored to the volume at the same time.
-  // If the provisioner does not support VolumeSnapshot data source, volume will
+  // This field can be used to specify either:
+  // * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot - Beta)
+  // * An existing PVC (PersistentVolumeClaim)
+  // * An existing custom resource/object that implements data population (Alpha)
+  // In order to use VolumeSnapshot object types, the appropriate feature gate
+  // must be enabled (VolumeSnapshotDataSource or AnyVolumeDataSource)
+  // If the provisioner or an external controller can support the specified data source,
+  // it will create a new volume based on the contents of the specified data source.
+  // If the specified data source is not supported, the volume will
   // not be created and the failure will be reported as an event.
   // In the future, we plan to support more data source types and the behavior
   // of the provisioner may change.
@@ -2821,7 +2853,6 @@ message PersistentVolumeSpec {
 
   // volumeMode defines if a volume is intended to be used with a formatted filesystem
   // or to remain in raw block state. Value of Filesystem is implied when not included in spec.
-  // This is a beta feature.
   // +optional
   optional string volumeMode = 8;
 
@@ -3247,6 +3278,15 @@ message PodSecurityContext {
   // sysctls (by the container runtime) might fail to launch.
   // +optional
   repeated Sysctl sysctls = 7;
+
+  // fsGroupChangePolicy defines behavior of changing ownership and permission of the volume
+  // before being exposed inside Pod. This field will only apply to
+  // volume types which support fsGroup based ownership(and permissions).
+  // It will have no effect on ephemeral volume types such as: secret, configmaps
+  // and emptydir.
+  // Valid values are "OnRootMismatch" and "Always". If not specified defaults to "Always".
+  // +optional
+  optional string fsGroupChangePolicy = 9;
 }
 
 // Describes the class of pods that should avoid this node.
@@ -3497,8 +3537,7 @@ message PodSpec {
 
   // TopologySpreadConstraints describes how a group of pods ought to spread across topology
   // domains. Scheduler will schedule pods in a way which abides by the constraints.
-  // This field is alpha-level and is only honored by clusters that enables the EvenPodsSpread
-  // feature.
+  // This field is only honored by clusters that enable the EvenPodsSpread feature.
   // All topologySpreadConstraints are ANDed.
   // +optional
   // +patchMergeKey=topologyKey
@@ -4256,6 +4295,14 @@ message Secret {
   // +optional
   optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
 
+  // Immutable, if set to true, ensures that data stored in the Secret cannot
+  // be updated (only object metadata can be modified).
+  // If not set to true, the field can be modified at any time.
+  // Defaulted to nil.
+  // This is an alpha field enabled by ImmutableEphemeralVolumes feature gate.
+  // +optional
+  optional bool immutable = 5;
+
   // Data contains the secret data. Each key must consist of alphanumeric
   // characters, '-', '_' or '.'. The serialized form of the secret data is a
   // base64 encoded string, representing the arbitrary (possibly non-string)
@@ -4581,6 +4628,16 @@ message ServicePort {
   // +optional
   optional string protocol = 2;
 
+  // The application protocol for this port.
+  // This field follows standard Kubernetes label syntax.
+  // Un-prefixed names are reserved for IANA standard service names (as per
+  // RFC-6335 and http://www.iana.org/assignments/service-names).
+  // Non-standard protocols should use prefixed names such as
+  // mycompany.com/my-custom-protocol.
+  // Field can be enabled with ServiceAppProtocol feature gate.
+  // +optional
+  optional string appProtocol = 6;
+
   // The port that will be exposed by this service.
   optional int32 port = 3;
 
@@ -4864,7 +4921,7 @@ message Taint {
   // Required. The taint key to be applied to a node.
   optional string key = 1;
 
-  // Required. The taint value corresponding to the taint key.
+  // The taint value corresponding to the taint key.
   // +optional
   optional string value = 2;
 
@@ -5256,14 +5313,12 @@ message WeightedPodAffinityTerm {
 // WindowsSecurityContextOptions contain Windows-specific options and credentials.
 message WindowsSecurityContextOptions {
   // GMSACredentialSpecName is the name of the GMSA credential spec to use.
-  // This field is alpha-level and is only honored by servers that enable the WindowsGMSA feature flag.
   // +optional
   optional string gmsaCredentialSpecName = 1;
 
   // GMSACredentialSpec is where the GMSA admission webhook
   // (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the
   // GMSA credential spec named by the GMSACredentialSpecName field.
-  // This field is alpha-level and is only honored by servers that enable the WindowsGMSA feature flag.
   // +optional
   optional string gmsaCredentialSpec = 2;
 
@@ -5271,7 +5326,6 @@ message WindowsSecurityContextOptions {
   // Defaults to the user specified in image metadata if unspecified.
   // May also be set in PodSecurityContext. If set in both SecurityContext and
   // PodSecurityContext, the value specified in SecurityContext takes precedence.
-  // This field is beta-level and may be disabled with the WindowsRunAsUserName feature flag.
   // +optional
   optional string runAsUserName = 3;
 }
diff --git a/vendor/k8s.io/api/core/v1/resource.go b/vendor/k8s.io/api/core/v1/resource.go
index bb804125..5bc9cd5b 100644
--- a/vendor/k8s.io/api/core/v1/resource.go
+++ b/vendor/k8s.io/api/core/v1/resource.go
@@ -41,6 +41,14 @@ func (self *ResourceList) Memory() *resource.Quantity {
 	return &resource.Quantity{Format: resource.BinarySI}
 }
 
+// Returns the Storage limit if specified.
+func (self *ResourceList) Storage() *resource.Quantity {
+	if val, ok := (*self)[ResourceStorage]; ok {
+		return &val
+	}
+	return &resource.Quantity{Format: resource.BinarySI}
+}
+
 func (self *ResourceList) Pods() *resource.Quantity {
 	if val, ok := (*self)[ResourcePods]; ok {
 		return &val
diff --git a/vendor/k8s.io/api/core/v1/types.go b/vendor/k8s.io/api/core/v1/types.go
index 47a40271..b61a86ab 100644
--- a/vendor/k8s.io/api/core/v1/types.go
+++ b/vendor/k8s.io/api/core/v1/types.go
@@ -331,7 +331,6 @@ type PersistentVolumeSpec struct {
 	MountOptions []string `json:"mountOptions,omitempty" protobuf:"bytes,7,opt,name=mountOptions"`
 	// volumeMode defines if a volume is intended to be used with a formatted filesystem
 	// or to remain in raw block state. Value of Filesystem is implied when not included in spec.
-	// This is a beta feature.
 	// +optional
 	VolumeMode *PersistentVolumeMode `json:"volumeMode,omitempty" protobuf:"bytes,8,opt,name=volumeMode,casttype=PersistentVolumeMode"`
 	// NodeAffinity defines constraints that limit what nodes this volume can be accessed from.
@@ -460,14 +459,17 @@ type PersistentVolumeClaimSpec struct {
 	StorageClassName *string `json:"storageClassName,omitempty" protobuf:"bytes,5,opt,name=storageClassName"`
 	// volumeMode defines what type of volume is required by the claim.
 	// Value of Filesystem is implied when not included in claim spec.
-	// This is a beta feature.
 	// +optional
 	VolumeMode *PersistentVolumeMode `json:"volumeMode,omitempty" protobuf:"bytes,6,opt,name=volumeMode,casttype=PersistentVolumeMode"`
-	// This field requires the VolumeSnapshotDataSource alpha feature gate to be
-	// enabled and currently VolumeSnapshot is the only supported data source.
-	// If the provisioner can support VolumeSnapshot data source, it will create
-	// a new volume and data will be restored to the volume at the same time.
-	// If the provisioner does not support VolumeSnapshot data source, volume will
+	// This field can be used to specify either:
+	// * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot - Beta)
+	// * An existing PVC (PersistentVolumeClaim)
+	// * An existing custom resource/object that implements data population (Alpha)
+	// In order to use VolumeSnapshot object types, the appropriate feature gate
+	// must be enabled (VolumeSnapshotDataSource or AnyVolumeDataSource)
+	// If the provisioner or an external controller can support the specified data source,
+	// it will create a new volume based on the contents of the specified data source.
+	// If the specified data source is not supported, the volume will
 	// not be created and the failure will be reported as an event.
 	// In the future, we plan to support more data source types and the behavior
 	// of the provisioner may change.
@@ -887,9 +889,10 @@ type FlockerVolumeSource struct {
 type StorageMedium string
 
 const (
-	StorageMediumDefault   StorageMedium = ""          // use whatever the default is for the node, assume anything we don't explicitly handle is this
-	StorageMediumMemory    StorageMedium = "Memory"    // use memory (e.g. tmpfs on linux)
-	StorageMediumHugePages StorageMedium = "HugePages" // use hugepages
+	StorageMediumDefault         StorageMedium = ""           // use whatever the default is for the node, assume anything we don't explicitly handle is this
+	StorageMediumMemory          StorageMedium = "Memory"     // use memory (e.g. tmpfs on linux)
+	StorageMediumHugePages       StorageMedium = "HugePages"  // use hugepages
+	StorageMediumHugePagesPrefix StorageMedium = "HugePages-" // prefix for full medium notation HugePages-<size>
 )
 
 // Protocol defines network protocols supported for things like container ports.
@@ -2180,7 +2183,6 @@ type Container struct {
 	// +patchStrategy=merge
 	VolumeMounts []VolumeMount `json:"volumeMounts,omitempty" patchStrategy:"merge" patchMergeKey:"mountPath" protobuf:"bytes,9,rep,name=volumeMounts"`
 	// volumeDevices is the list of block devices to be used by the container.
-	// This is a beta feature.
 	// +patchMergeKey=devicePath
 	// +patchStrategy=merge
 	// +optional
@@ -2203,7 +2205,7 @@ type Container struct {
 	// This can be used to provide different probe parameters at the beginning of a Pod's lifecycle,
 	// when it might take a long time to load data or warm a cache, than during steady-state operation.
 	// This cannot be updated.
-	// This is an alpha feature enabled by the StartupProbe feature flag.
+	// This is a beta feature enabled by the StartupProbe feature flag.
 	// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
 	// +optional
 	StartupProbe *Probe `json:"startupProbe,omitempty" protobuf:"bytes,22,opt,name=startupProbe"`
@@ -2750,7 +2752,7 @@ type PreferredSchedulingTerm struct {
 type Taint struct {
 	// Required. The taint key to be applied to a node.
 	Key string `json:"key" protobuf:"bytes,1,opt,name=key"`
-	// Required. The taint value corresponding to the taint key.
+	// The taint value corresponding to the taint key.
 	// +optional
 	Value string `json:"value,omitempty" protobuf:"bytes,2,opt,name=value"`
 	// Required. The effect of the taint on pods
@@ -3038,8 +3040,7 @@ type PodSpec struct {
 	Overhead ResourceList `json:"overhead,omitempty" protobuf:"bytes,32,opt,name=overhead"`
 	// TopologySpreadConstraints describes how a group of pods ought to spread across topology
 	// domains. Scheduler will schedule pods in a way which abides by the constraints.
-	// This field is alpha-level and is only honored by clusters that enables the EvenPodsSpread
-	// feature.
+	// This field is only honored by clusters that enable the EvenPodsSpread feature.
 	// All topologySpreadConstraints are ANDed.
 	// +optional
 	// +patchMergeKey=topologyKey
@@ -3125,6 +3126,22 @@ type HostAlias struct {
 	Hostnames []string `json:"hostnames,omitempty" protobuf:"bytes,2,rep,name=hostnames"`
 }
 
+// PodFSGroupChangePolicy holds policies that will be used for applying fsGroup to a volume
+// when volume is mounted.
+type PodFSGroupChangePolicy string
+
+const (
+	// FSGroupChangeOnRootMismatch indicates that volume's ownership and permissions will be changed
+	// only when permission and ownership of root directory does not match with expected
+	// permissions on the volume. This can help shorten the time it takes to change
+	// ownership and permissions of a volume.
+	FSGroupChangeOnRootMismatch PodFSGroupChangePolicy = "OnRootMismatch"
+	// FSGroupChangeAlways indicates that volume's ownership and permissions
+	// should always be changed whenever volume is mounted inside a Pod. This the default
+	// behavior.
+	FSGroupChangeAlways PodFSGroupChangePolicy = "Always"
+)
+
 // PodSecurityContext holds pod-level security attributes and common container settings.
 // Some fields are also present in container.securityContext.  Field values of
 // container.securityContext take precedence over field values of PodSecurityContext.
@@ -3183,6 +3200,14 @@ type PodSecurityContext struct {
 	// sysctls (by the container runtime) might fail to launch.
 	// +optional
 	Sysctls []Sysctl `json:"sysctls,omitempty" protobuf:"bytes,7,rep,name=sysctls"`
+	// fsGroupChangePolicy defines behavior of changing ownership and permission of the volume
+	// before being exposed inside Pod. This field will only apply to
+	// volume types which support fsGroup based ownership(and permissions).
+	// It will have no effect on ephemeral volume types such as: secret, configmaps
+	// and emptydir.
+	// Valid values are "OnRootMismatch" and "Always". If not specified defaults to "Always".
+	// +optional
+	FSGroupChangePolicy *PodFSGroupChangePolicy `json:"fsGroupChangePolicy,omitempty" protobuf:"bytes,9,opt,name=fsGroupChangePolicy"`
 }
 
 // PodQOSClass defines the supported qos classes of Pods.
@@ -3298,7 +3323,6 @@ type EphemeralContainerCommon struct {
 	// +patchStrategy=merge
 	VolumeMounts []VolumeMount `json:"volumeMounts,omitempty" patchStrategy:"merge" patchMergeKey:"mountPath" protobuf:"bytes,9,rep,name=volumeMounts"`
 	// volumeDevices is the list of block devices to be used by the container.
-	// This is a beta feature.
 	// +patchMergeKey=devicePath
 	// +patchStrategy=merge
 	// +optional
@@ -3990,6 +4014,16 @@ type ServicePort struct {
 	// +optional
 	Protocol Protocol `json:"protocol,omitempty" protobuf:"bytes,2,opt,name=protocol,casttype=Protocol"`
 
+	// The application protocol for this port.
+	// This field follows standard Kubernetes label syntax.
+	// Un-prefixed names are reserved for IANA standard service names (as per
+	// RFC-6335 and http://www.iana.org/assignments/service-names).
+	// Non-standard protocols should use prefixed names such as
+	// mycompany.com/my-custom-protocol.
+	// Field can be enabled with ServiceAppProtocol feature gate.
+	// +optional
+	AppProtocol *string `json:"appProtocol,omitempty" protobuf:"bytes,6,opt,name=appProtocol"`
+
 	// The port that will be exposed by this service.
 	Port int32 `json:"port" protobuf:"varint,3,opt,name=port"`
 
@@ -4061,6 +4095,7 @@ type ServiceList struct {
 }
 
 // +genclient
+// +genclient:method=CreateToken,verb=create,subresource=token,input=k8s.io/api/authentication/v1.TokenRequest,result=k8s.io/api/authentication/v1.TokenRequest
 // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
 
 // ServiceAccount binds together:
@@ -4204,6 +4239,16 @@ type EndpointPort struct {
 	// Default is TCP.
 	// +optional
 	Protocol Protocol `json:"protocol,omitempty" protobuf:"bytes,3,opt,name=protocol,casttype=Protocol"`
+
+	// The application protocol for this port.
+	// This field follows standard Kubernetes label syntax.
+	// Un-prefixed names are reserved for IANA standard service names (as per
+	// RFC-6335 and http://www.iana.org/assignments/service-names).
+	// Non-standard protocols should use prefixed names such as
+	// mycompany.com/my-custom-protocol.
+	// Field can be enabled with ServiceAppProtocol feature gate.
+	// +optional
+	AppProtocol *string `json:"appProtocol,omitempty" protobuf:"bytes,4,opt,name=appProtocol"`
 }
 
 // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
@@ -4981,6 +5026,20 @@ type ServiceProxyOptions struct {
 }
 
 // ObjectReference contains enough information to let you inspect or modify the referred object.
+// ---
+// New uses of this type are discouraged because of difficulty describing its usage when embedded in APIs.
+//  1. Ignored fields.  It includes many fields which are not generally honored.  For instance, ResourceVersion and FieldPath are both very rarely valid in actual usage.
+//  2. Invalid usage help.  It is impossible to add specific help for individual usage.  In most embedded usages, there are particular
+//     restrictions like, "must refer only to types A and B" or "UID not honored" or "name must be restricted".
+//     Those cannot be well described when embedded.
+//  3. Inconsistent validation.  Because the usages are different, the validation rules are different by usage, which makes it hard for users to predict what will happen.
+//  4. The fields are both imprecise and overly precise.  Kind is not a precise mapping to a URL. This can produce ambiguity
+//     during interpretation and require a REST mapping.  In most cases, the dependency is on the group,resource tuple
+//     and the version of the actual struct is irrelevant.
+//  5. We cannot easily change it.  Because this type is embedded in many locations, updates to this type
+//     will affect numerous schemas.  Don't make new APIs embed an underspecified API type they do not control.
+// Instead of using this type, create a locally provided and used type that is well-focused on your reference.
+// For example, ServiceReferences for admission registration: https://github.com/kubernetes/api/blob/release-1.17/admissionregistration/v1/types.go#L533 .
 // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
 type ObjectReference struct {
 	// Kind of the referent.
@@ -5194,8 +5253,7 @@ const (
 // LimitRangeItem defines a min/max usage limit for any resource that matches on kind.
 type LimitRangeItem struct {
 	// Type of resource that this limit applies to.
-	// +optional
-	Type LimitType `json:"type,omitempty" protobuf:"bytes,1,opt,name=type,casttype=LimitType"`
+	Type LimitType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=LimitType"`
 	// Max usage constraints on this kind by resource name.
 	// +optional
 	Max ResourceList `json:"max,omitempty" protobuf:"bytes,2,rep,name=max,casttype=ResourceList,castkey=ResourceName"`
@@ -5424,6 +5482,14 @@ type Secret struct {
 	// +optional
 	metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
 
+	// Immutable, if set to true, ensures that data stored in the Secret cannot
+	// be updated (only object metadata can be modified).
+	// If not set to true, the field can be modified at any time.
+	// Defaulted to nil.
+	// This is an alpha field enabled by ImmutableEphemeralVolumes feature gate.
+	// +optional
+	Immutable *bool `json:"immutable,omitempty" protobuf:"varint,5,opt,name=immutable"`
+
 	// Data contains the secret data. Each key must consist of alphanumeric
 	// characters, '-', '_' or '.'. The serialized form of the secret data is a
 	// base64 encoded string, representing the arbitrary (possibly non-string)
@@ -5557,6 +5623,14 @@ type ConfigMap struct {
 	// +optional
 	metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
 
+	// Immutable, if set to true, ensures that data stored in the ConfigMap cannot
+	// be updated (only object metadata can be modified).
+	// If not set to true, the field can be modified at any time.
+	// Defaulted to nil.
+	// This is an alpha field enabled by ImmutableEphemeralVolumes feature gate.
+	// +optional
+	Immutable *bool `json:"immutable,omitempty" protobuf:"varint,4,opt,name=immutable"`
+
 	// Data contains the configuration data.
 	// Each key must consist of alphanumeric characters, '-', '_' or '.'.
 	// Values with non-UTF-8 byte sequences must use the BinaryData field.
@@ -5793,14 +5867,12 @@ type SELinuxOptions struct {
 // WindowsSecurityContextOptions contain Windows-specific options and credentials.
 type WindowsSecurityContextOptions struct {
 	// GMSACredentialSpecName is the name of the GMSA credential spec to use.
-	// This field is alpha-level and is only honored by servers that enable the WindowsGMSA feature flag.
 	// +optional
 	GMSACredentialSpecName *string `json:"gmsaCredentialSpecName,omitempty" protobuf:"bytes,1,opt,name=gmsaCredentialSpecName"`
 
 	// GMSACredentialSpec is where the GMSA admission webhook
 	// (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the
 	// GMSA credential spec named by the GMSACredentialSpecName field.
-	// This field is alpha-level and is only honored by servers that enable the WindowsGMSA feature flag.
 	// +optional
 	GMSACredentialSpec *string `json:"gmsaCredentialSpec,omitempty" protobuf:"bytes,2,opt,name=gmsaCredentialSpec"`
 
@@ -5808,7 +5880,6 @@ type WindowsSecurityContextOptions struct {
 	// Defaults to the user specified in image metadata if unspecified.
 	// May also be set in PodSecurityContext. If set in both SecurityContext and
 	// PodSecurityContext, the value specified in SecurityContext takes precedence.
-	// This field is beta-level and may be disabled with the WindowsRunAsUserName feature flag.
 	// +optional
 	RunAsUserName *string `json:"runAsUserName,omitempty" protobuf:"bytes,3,opt,name=runAsUserName"`
 }
diff --git a/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go
index 441d3e10..331451fe 100644
--- a/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go
+++ b/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go
@@ -252,6 +252,7 @@ func (ComponentStatusList) SwaggerDoc() map[string]string {
 var map_ConfigMap = map[string]string{
 	"":           "ConfigMap holds configuration data for pods to consume.",
 	"metadata":   "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+	"immutable":  "Immutable, if set to true, ensures that data stored in the ConfigMap cannot be updated (only object metadata can be modified). If not set to true, the field can be modified at any time. Defaulted to nil. This is an alpha field enabled by ImmutableEphemeralVolumes feature gate.",
 	"data":       "Data contains the configuration data. Each key must consist of alphanumeric characters, '-', '_' or '.'. Values with non-UTF-8 byte sequences must use the BinaryData field. The keys stored in Data must not overlap with the keys in the BinaryData field, this is enforced during validation process.",
 	"binaryData": "BinaryData contains the binary data. Each key must consist of alphanumeric characters, '-', '_' or '.'. BinaryData can contain byte sequences that are not in the UTF-8 range. The keys stored in BinaryData must not overlap with the ones in the Data field, this is enforced during validation process. Using this field will require 1.10+ apiserver and kubelet.",
 }
@@ -335,10 +336,10 @@ var map_Container = map[string]string{
 	"env":                      "List of environment variables to set in the container. Cannot be updated.",
 	"resources":                "Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/",
 	"volumeMounts":             "Pod volumes to mount into the container's filesystem. Cannot be updated.",
-	"volumeDevices":            "volumeDevices is the list of block devices to be used by the container. This is a beta feature.",
+	"volumeDevices":            "volumeDevices is the list of block devices to be used by the container.",
 	"livenessProbe":            "Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes",
 	"readinessProbe":           "Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes",
-	"startupProbe":             "StartupProbe indicates that the Pod has successfully initialized. If specified, no other probes are executed until this completes successfully. If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, when it might take a long time to load data or warm a cache, than during steady-state operation. This cannot be updated. This is an alpha feature enabled by the StartupProbe feature flag. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes",
+	"startupProbe":             "StartupProbe indicates that the Pod has successfully initialized. If specified, no other probes are executed until this completes successfully. If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, when it might take a long time to load data or warm a cache, than during steady-state operation. This cannot be updated. This is a beta feature enabled by the StartupProbe feature flag. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes",
 	"lifecycle":                "Actions that the management system should take in response to container lifecycle events. Cannot be updated.",
 	"terminationMessagePath":   "Optional: Path at which the file to which the container's termination message will be written is mounted into the container's filesystem. Message written is intended to be brief final status, such as an assertion failure message. Will be truncated by the node if greater than 4096 bytes. The total message length across all containers will be limited to 12kb. Defaults to /dev/termination-log. Cannot be updated.",
 	"terminationMessagePolicy": "Indicate how the termination message should be populated. File will use the contents of terminationMessagePath to populate the container status message on both success and failure. FallbackToLogsOnError will use the last chunk of container log output if the termination message file is empty and the container exited with an error. The log output is limited to 2048 bytes or 80 lines, whichever is smaller. Defaults to File. Cannot be updated.",
@@ -501,10 +502,11 @@ func (EndpointAddress) SwaggerDoc() map[string]string {
 }
 
 var map_EndpointPort = map[string]string{
-	"":         "EndpointPort is a tuple that describes a single port.",
-	"name":     "The name of this port.  This must match the 'name' field in the corresponding ServicePort. Must be a DNS_LABEL. Optional only if one port is defined.",
-	"port":     "The port number of the endpoint.",
-	"protocol": "The IP protocol for this port. Must be UDP, TCP, or SCTP. Default is TCP.",
+	"":            "EndpointPort is a tuple that describes a single port.",
+	"name":        "The name of this port.  This must match the 'name' field in the corresponding ServicePort. Must be a DNS_LABEL. Optional only if one port is defined.",
+	"port":        "The port number of the endpoint.",
+	"protocol":    "The IP protocol for this port. Must be UDP, TCP, or SCTP. Default is TCP.",
+	"appProtocol": "The application protocol for this port. This field follows standard Kubernetes label syntax. Un-prefixed names are reserved for IANA standard service names (as per RFC-6335 and http://www.iana.org/assignments/service-names). Non-standard protocols should use prefixed names such as mycompany.com/my-custom-protocol. Field can be enabled with ServiceAppProtocol feature gate.",
 }
 
 func (EndpointPort) SwaggerDoc() map[string]string {
@@ -597,7 +599,7 @@ var map_EphemeralContainerCommon = map[string]string{
 	"env":                      "List of environment variables to set in the container. Cannot be updated.",
 	"resources":                "Resources are not allowed for ephemeral containers. Ephemeral containers use spare resources already allocated to the pod.",
 	"volumeMounts":             "Pod volumes to mount into the container's filesystem. Cannot be updated.",
-	"volumeDevices":            "volumeDevices is the list of block devices to be used by the container. This is a beta feature.",
+	"volumeDevices":            "volumeDevices is the list of block devices to be used by the container.",
 	"livenessProbe":            "Probes are not allowed for ephemeral containers.",
 	"readinessProbe":           "Probes are not allowed for ephemeral containers.",
 	"startupProbe":             "Probes are not allowed for ephemeral containers.",
@@ -1298,8 +1300,8 @@ var map_PersistentVolumeClaimSpec = map[string]string{
 	"resources":        "Resources represents the minimum resources the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources",
 	"volumeName":       "VolumeName is the binding reference to the PersistentVolume backing this claim.",
 	"storageClassName": "Name of the StorageClass required by the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1",
-	"volumeMode":       "volumeMode defines what type of volume is required by the claim. Value of Filesystem is implied when not included in claim spec. This is a beta feature.",
-	"dataSource":       "This field requires the VolumeSnapshotDataSource alpha feature gate to be enabled and currently VolumeSnapshot is the only supported data source. If the provisioner can support VolumeSnapshot data source, it will create a new volume and data will be restored to the volume at the same time. If the provisioner does not support VolumeSnapshot data source, volume will not be created and the failure will be reported as an event. In the future, we plan to support more data source types and the behavior of the provisioner may change.",
+	"volumeMode":       "volumeMode defines what type of volume is required by the claim. Value of Filesystem is implied when not included in claim spec.",
+	"dataSource":       "This field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot - Beta) * An existing PVC (PersistentVolumeClaim) * An existing custom resource/object that implements data population (Alpha) In order to use VolumeSnapshot object types, the appropriate feature gate must be enabled (VolumeSnapshotDataSource or AnyVolumeDataSource) If the provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data source. If the specified data source is not supported, the volume will not be created and the failure will be reported as an event. In the future, we plan to support more data source types and the behavior of the provisioner may change.",
 }
 
 func (PersistentVolumeClaimSpec) SwaggerDoc() map[string]string {
@@ -1376,7 +1378,7 @@ var map_PersistentVolumeSpec = map[string]string{
 	"persistentVolumeReclaimPolicy": "What happens to a persistent volume when released from its claim. Valid options are Retain (default for manually created PersistentVolumes), Delete (default for dynamically provisioned PersistentVolumes), and Recycle (deprecated). Recycle must be supported by the volume plugin underlying this PersistentVolume. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#reclaiming",
 	"storageClassName":              "Name of StorageClass to which this persistent volume belongs. Empty value means that this volume does not belong to any StorageClass.",
 	"mountOptions":                  "A list of mount options, e.g. [\"ro\", \"soft\"]. Not validated - mount will simply fail if one is invalid. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#mount-options",
-	"volumeMode":                    "volumeMode defines if a volume is intended to be used with a formatted filesystem or to remain in raw block state. Value of Filesystem is implied when not included in spec. This is a beta feature.",
+	"volumeMode":                    "volumeMode defines if a volume is intended to be used with a formatted filesystem or to remain in raw block state. Value of Filesystem is implied when not included in spec.",
 	"nodeAffinity":                  "NodeAffinity defines constraints that limit what nodes this volume can be accessed from. This field influences the scheduling of pods that use this volume.",
 }
 
@@ -1572,15 +1574,16 @@ func (PodReadinessGate) SwaggerDoc() map[string]string {
 }
 
 var map_PodSecurityContext = map[string]string{
-	"":                   "PodSecurityContext holds pod-level security attributes and common container settings. Some fields are also present in container.securityContext.  Field values of container.securityContext take precedence over field values of PodSecurityContext.",
-	"seLinuxOptions":     "The SELinux context to be applied to all containers. If unspecified, the container runtime will allocate a random SELinux context for each container.  May also be set in SecurityContext.  If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container.",
-	"windowsOptions":     "The Windows specific settings applied to all containers. If unspecified, the options within a container's SecurityContext will be used. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.",
-	"runAsUser":          "The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in SecurityContext.  If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container.",
-	"runAsGroup":         "The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in SecurityContext.  If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container.",
-	"runAsNonRoot":       "Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in SecurityContext.  If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.",
-	"supplementalGroups": "A list of groups applied to the first process run in each container, in addition to the container's primary GID.  If unspecified, no groups will be added to any container.",
-	"fsGroup":            "A special supplemental group that applies to all containers in a pod. Some volume types allow the Kubelet to change the ownership of that volume to be owned by the pod:\n\n1. The owning GID will be the FSGroup 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) 3. The permission bits are OR'd with rw-rw ",
-	"sysctls":            "Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch.",
+	"":                    "PodSecurityContext holds pod-level security attributes and common container settings. Some fields are also present in container.securityContext.  Field values of container.securityContext take precedence over field values of PodSecurityContext.",
+	"seLinuxOptions":      "The SELinux context to be applied to all containers. If unspecified, the container runtime will allocate a random SELinux context for each container.  May also be set in SecurityContext.  If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container.",
+	"windowsOptions":      "The Windows specific settings applied to all containers. If unspecified, the options within a container's SecurityContext will be used. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.",
+	"runAsUser":           "The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in SecurityContext.  If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container.",
+	"runAsGroup":          "The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in SecurityContext.  If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container.",
+	"runAsNonRoot":        "Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in SecurityContext.  If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.",
+	"supplementalGroups":  "A list of groups applied to the first process run in each container, in addition to the container's primary GID.  If unspecified, no groups will be added to any container.",
+	"fsGroup":             "A special supplemental group that applies to all containers in a pod. Some volume types allow the Kubelet to change the ownership of that volume to be owned by the pod:\n\n1. The owning GID will be the FSGroup 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) 3. The permission bits are OR'd with rw-rw ",
+	"sysctls":             "Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch.",
+	"fsGroupChangePolicy": "fsGroupChangePolicy defines behavior of changing ownership and permission of the volume before being exposed inside Pod. This field will only apply to volume types which support fsGroup based ownership(and permissions). It will have no effect on ephemeral volume types such as: secret, configmaps and emptydir. Valid values are \"OnRootMismatch\" and \"Always\". If not specified defaults to \"Always\".",
 }
 
 func (PodSecurityContext) SwaggerDoc() map[string]string {
@@ -1631,7 +1634,7 @@ var map_PodSpec = map[string]string{
 	"enableServiceLinks":            "EnableServiceLinks indicates whether information about services should be injected into pod's environment variables, matching the syntax of Docker links. Optional: Defaults to true.",
 	"preemptionPolicy":              "PreemptionPolicy is the Policy for preempting pods with lower priority. One of Never, PreemptLowerPriority. Defaults to PreemptLowerPriority if unset. This field is alpha-level and is only honored by servers that enable the NonPreemptingPriority feature.",
 	"overhead":                      "Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/20190226-pod-overhead.md This field is alpha-level as of Kubernetes v1.16, and is only honored by servers that enable the PodOverhead feature.",
-	"topologySpreadConstraints":     "TopologySpreadConstraints describes how a group of pods ought to spread across topology domains. Scheduler will schedule pods in a way which abides by the constraints. This field is alpha-level and is only honored by clusters that enables the EvenPodsSpread feature. All topologySpreadConstraints are ANDed.",
+	"topologySpreadConstraints":     "TopologySpreadConstraints describes how a group of pods ought to spread across topology domains. Scheduler will schedule pods in a way which abides by the constraints. This field is only honored by clusters that enable the EvenPodsSpread feature. All topologySpreadConstraints are ANDed.",
 }
 
 func (PodSpec) SwaggerDoc() map[string]string {
@@ -2015,6 +2018,7 @@ func (ScopedResourceSelectorRequirement) SwaggerDoc() map[string]string {
 var map_Secret = map[string]string{
 	"":           "Secret holds secret data of a certain type. The total bytes of the values in the Data field must be less than MaxSecretSize bytes.",
 	"metadata":   "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+	"immutable":  "Immutable, if set to true, ensures that data stored in the Secret cannot be updated (only object metadata can be modified). If not set to true, the field can be modified at any time. Defaulted to nil. This is an alpha field enabled by ImmutableEphemeralVolumes feature gate.",
 	"data":       "Data contains the secret data. Each key must consist of alphanumeric characters, '-', '_' or '.'. The serialized form of the secret data is a base64 encoded string, representing the arbitrary (possibly non-string) data value here. Described in https://tools.ietf.org/html/rfc4648#section-4",
 	"stringData": "stringData allows specifying non-binary secret data in string form. It is provided as a write-only convenience method. All keys and values are merged into the data field on write, overwriting any existing values. It is never output when reading from the API.",
 	"type":       "Used to facilitate programmatic handling of secret data.",
@@ -2167,12 +2171,13 @@ func (ServiceList) SwaggerDoc() map[string]string {
 }
 
 var map_ServicePort = map[string]string{
-	"":           "ServicePort contains information on service's port.",
-	"name":       "The name of this port within the service. This must be a DNS_LABEL. All ports within a ServiceSpec must have unique names. When considering the endpoints for a Service, this must match the 'name' field in the EndpointPort. Optional if only one ServicePort is defined on this service.",
-	"protocol":   "The IP protocol for this port. Supports \"TCP\", \"UDP\", and \"SCTP\". Default is TCP.",
-	"port":       "The port that will be exposed by this service.",
-	"targetPort": "Number or name of the port to access on the pods targeted by the service. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. If this is a string, it will be looked up as a named port in the target Pod's container ports. If this is not specified, the value of the 'port' field is used (an identity map). This field is ignored for services with clusterIP=None, and should be omitted or set equal to the 'port' field. More info: https://kubernetes.io/docs/concepts/services-networking/service/#defining-a-service",
-	"nodePort":   "The port on each node on which this service is exposed when type=NodePort or LoadBalancer. Usually assigned by the system. If specified, it will be allocated to the service if unused or else creation of the service will fail. Default is to auto-allocate a port if the ServiceType of this Service requires one. More info: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport",
+	"":            "ServicePort contains information on service's port.",
+	"name":        "The name of this port within the service. This must be a DNS_LABEL. All ports within a ServiceSpec must have unique names. When considering the endpoints for a Service, this must match the 'name' field in the EndpointPort. Optional if only one ServicePort is defined on this service.",
+	"protocol":    "The IP protocol for this port. Supports \"TCP\", \"UDP\", and \"SCTP\". Default is TCP.",
+	"appProtocol": "The application protocol for this port. This field follows standard Kubernetes label syntax. Un-prefixed names are reserved for IANA standard service names (as per RFC-6335 and http://www.iana.org/assignments/service-names). Non-standard protocols should use prefixed names such as mycompany.com/my-custom-protocol. Field can be enabled with ServiceAppProtocol feature gate.",
+	"port":        "The port that will be exposed by this service.",
+	"targetPort":  "Number or name of the port to access on the pods targeted by the service. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. If this is a string, it will be looked up as a named port in the target Pod's container ports. If this is not specified, the value of the 'port' field is used (an identity map). This field is ignored for services with clusterIP=None, and should be omitted or set equal to the 'port' field. More info: https://kubernetes.io/docs/concepts/services-networking/service/#defining-a-service",
+	"nodePort":    "The port on each node on which this service is exposed when type=NodePort or LoadBalancer. Usually assigned by the system. If specified, it will be allocated to the service if unused or else creation of the service will fail. Default is to auto-allocate a port if the ServiceType of this Service requires one. More info: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport",
 }
 
 func (ServicePort) SwaggerDoc() map[string]string {
@@ -2278,7 +2283,7 @@ func (TCPSocketAction) SwaggerDoc() map[string]string {
 var map_Taint = map[string]string{
 	"":          "The node this Taint is attached to has the \"effect\" on any pod that does not tolerate the Taint.",
 	"key":       "Required. The taint key to be applied to a node.",
-	"value":     "Required. The taint value corresponding to the taint key.",
+	"value":     "The taint value corresponding to the taint key.",
 	"effect":    "Required. The effect of the taint on pods that do not tolerate the taint. Valid effects are NoSchedule, PreferNoSchedule and NoExecute.",
 	"timeAdded": "TimeAdded represents the time at which the taint was added. It is only written for NoExecute taints.",
 }
@@ -2456,9 +2461,9 @@ func (WeightedPodAffinityTerm) SwaggerDoc() map[string]string {
 
 var map_WindowsSecurityContextOptions = map[string]string{
 	"":                       "WindowsSecurityContextOptions contain Windows-specific options and credentials.",
-	"gmsaCredentialSpecName": "GMSACredentialSpecName is the name of the GMSA credential spec to use. This field is alpha-level and is only honored by servers that enable the WindowsGMSA feature flag.",
-	"gmsaCredentialSpec":     "GMSACredentialSpec is where the GMSA admission webhook (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the GMSA credential spec named by the GMSACredentialSpecName field. This field is alpha-level and is only honored by servers that enable the WindowsGMSA feature flag.",
-	"runAsUserName":          "The UserName in Windows to run the entrypoint of the container process. Defaults to the user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. This field is beta-level and may be disabled with the WindowsRunAsUserName feature flag.",
+	"gmsaCredentialSpecName": "GMSACredentialSpecName is the name of the GMSA credential spec to use.",
+	"gmsaCredentialSpec":     "GMSACredentialSpec is where the GMSA admission webhook (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the GMSA credential spec named by the GMSACredentialSpecName field.",
+	"runAsUserName":          "The UserName in Windows to run the entrypoint of the container process. Defaults to the user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.",
 }
 
 func (WindowsSecurityContextOptions) SwaggerDoc() map[string]string {
diff --git a/vendor/k8s.io/api/core/v1/well_known_taints.go b/vendor/k8s.io/api/core/v1/well_known_taints.go
index e3905192..e1a8f629 100644
--- a/vendor/k8s.io/api/core/v1/well_known_taints.go
+++ b/vendor/k8s.io/api/core/v1/well_known_taints.go
@@ -18,38 +18,31 @@ package v1
 
 const (
 	// TaintNodeNotReady will be added when node is not ready
-	// and feature-gate for TaintBasedEvictions flag is enabled,
 	// and removed when node becomes ready.
 	TaintNodeNotReady = "node.kubernetes.io/not-ready"
 
 	// TaintNodeUnreachable will be added when node becomes unreachable
 	// (corresponding to NodeReady status ConditionUnknown)
-	// and feature-gate for TaintBasedEvictions flag is enabled,
 	// and removed when node becomes reachable (NodeReady status ConditionTrue).
 	TaintNodeUnreachable = "node.kubernetes.io/unreachable"
 
 	// TaintNodeUnschedulable will be added when node becomes unschedulable
-	// and feature-gate for TaintNodesByCondition flag is enabled,
 	// and removed when node becomes scheduable.
 	TaintNodeUnschedulable = "node.kubernetes.io/unschedulable"
 
 	// TaintNodeMemoryPressure will be added when node has memory pressure
-	// and feature-gate for TaintNodesByCondition flag is enabled,
 	// and removed when node has enough memory.
 	TaintNodeMemoryPressure = "node.kubernetes.io/memory-pressure"
 
 	// TaintNodeDiskPressure will be added when node has disk pressure
-	// and feature-gate for TaintNodesByCondition flag is enabled,
 	// and removed when node has enough disk.
 	TaintNodeDiskPressure = "node.kubernetes.io/disk-pressure"
 
 	// TaintNodeNetworkUnavailable will be added when node's network is unavailable
-	// and feature-gate for TaintNodesByCondition flag is enabled,
 	// and removed when network becomes ready.
 	TaintNodeNetworkUnavailable = "node.kubernetes.io/network-unavailable"
 
 	// TaintNodePIDPressure will be added when node has pid pressure
-	// and feature-gate for TaintNodesByCondition flag is enabled,
 	// and removed when node has enough disk.
 	TaintNodePIDPressure = "node.kubernetes.io/pid-pressure"
 )
diff --git a/vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go
index ac4855ab..23d96444 100644
--- a/vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go
+++ b/vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go
@@ -519,6 +519,11 @@ func (in *ConfigMap) DeepCopyInto(out *ConfigMap) {
 	*out = *in
 	out.TypeMeta = in.TypeMeta
 	in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+	if in.Immutable != nil {
+		in, out := &in.Immutable, &out.Immutable
+		*out = new(bool)
+		**out = **in
+	}
 	if in.Data != nil {
 		in, out := &in.Data, &out.Data
 		*out = make(map[string]string, len(*in))
@@ -1091,6 +1096,11 @@ func (in *EndpointAddress) DeepCopy() *EndpointAddress {
 // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
 func (in *EndpointPort) DeepCopyInto(out *EndpointPort) {
 	*out = *in
+	if in.AppProtocol != nil {
+		in, out := &in.AppProtocol, &out.AppProtocol
+		*out = new(string)
+		**out = **in
+	}
 	return
 }
 
@@ -1124,7 +1134,9 @@ func (in *EndpointSubset) DeepCopyInto(out *EndpointSubset) {
 	if in.Ports != nil {
 		in, out := &in.Ports, &out.Ports
 		*out = make([]EndpointPort, len(*in))
-		copy(*out, *in)
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
 	}
 	return
 }
@@ -3677,6 +3689,11 @@ func (in *PodSecurityContext) DeepCopyInto(out *PodSecurityContext) {
 		*out = make([]Sysctl, len(*in))
 		copy(*out, *in)
 	}
+	if in.FSGroupChangePolicy != nil {
+		in, out := &in.FSGroupChangePolicy, &out.FSGroupChangePolicy
+		*out = new(PodFSGroupChangePolicy)
+		**out = **in
+	}
 	return
 }
 
@@ -4663,6 +4680,11 @@ func (in *Secret) DeepCopyInto(out *Secret) {
 	*out = *in
 	out.TypeMeta = in.TypeMeta
 	in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+	if in.Immutable != nil {
+		in, out := &in.Immutable, &out.Immutable
+		*out = new(bool)
+		**out = **in
+	}
 	if in.Data != nil {
 		in, out := &in.Data, &out.Data
 		*out = make(map[string][]byte, len(*in))
@@ -5112,6 +5134,11 @@ func (in *ServiceList) DeepCopyObject() runtime.Object {
 // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
 func (in *ServicePort) DeepCopyInto(out *ServicePort) {
 	*out = *in
+	if in.AppProtocol != nil {
+		in, out := &in.AppProtocol, &out.AppProtocol
+		*out = new(string)
+		**out = **in
+	}
 	out.TargetPort = in.TargetPort
 	return
 }
@@ -5157,7 +5184,9 @@ func (in *ServiceSpec) DeepCopyInto(out *ServiceSpec) {
 	if in.Ports != nil {
 		in, out := &in.Ports, &out.Ports
 		*out = make([]ServicePort, len(*in))
-		copy(*out, *in)
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
 	}
 	if in.Selector != nil {
 		in, out := &in.Selector, &out.Selector
diff --git a/vendor/k8s.io/api/discovery/v1alpha1/generated.pb.go b/vendor/k8s.io/api/discovery/v1alpha1/generated.pb.go
index fa4d3ac5..45c4382c 100644
--- a/vendor/k8s.io/api/discovery/v1alpha1/generated.pb.go
+++ b/vendor/k8s.io/api/discovery/v1alpha1/generated.pb.go
@@ -44,7 +44,7 @@ var _ = math.Inf
 // is compatible with the proto package it is being compiled against.
 // A compilation error at this line likely means your copy of the
 // proto package needs to be updated.
-const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
+const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
 
 func (m *Endpoint) Reset()      { *m = Endpoint{} }
 func (*Endpoint) ProtoMessage() {}
@@ -1621,6 +1621,7 @@ func (m *EndpointSliceList) Unmarshal(dAtA []byte) error {
 func skipGenerated(dAtA []byte) (n int, err error) {
 	l := len(dAtA)
 	iNdEx := 0
+	depth := 0
 	for iNdEx < l {
 		var wire uint64
 		for shift := uint(0); ; shift += 7 {
@@ -1652,10 +1653,8 @@ func skipGenerated(dAtA []byte) (n int, err error) {
 					break
 				}
 			}
-			return iNdEx, nil
 		case 1:
 			iNdEx += 8
-			return iNdEx, nil
 		case 2:
 			var length int
 			for shift := uint(0); ; shift += 7 {
@@ -1676,55 +1675,30 @@ func skipGenerated(dAtA []byte) (n int, err error) {
 				return 0, ErrInvalidLengthGenerated
 			}
 			iNdEx += length
-			if iNdEx < 0 {
-				return 0, ErrInvalidLengthGenerated
-			}
-			return iNdEx, nil
 		case 3:
-			for {
-				var innerWire uint64
-				var start int = iNdEx
-				for shift := uint(0); ; shift += 7 {
-					if shift >= 64 {
-						return 0, ErrIntOverflowGenerated
-					}
-					if iNdEx >= l {
-						return 0, io.ErrUnexpectedEOF
-					}
-					b := dAtA[iNdEx]
-					iNdEx++
-					innerWire |= (uint64(b) & 0x7F) << shift
-					if b < 0x80 {
-						break
-					}
-				}
-				innerWireType := int(innerWire & 0x7)
-				if innerWireType == 4 {
-					break
-				}
-				next, err := skipGenerated(dAtA[start:])
-				if err != nil {
-					return 0, err
-				}
-				iNdEx = start + next
-				if iNdEx < 0 {
-					return 0, ErrInvalidLengthGenerated
-				}
-			}
-			return iNdEx, nil
+			depth++
 		case 4:
-			return iNdEx, nil
+			if depth == 0 {
+				return 0, ErrUnexpectedEndOfGroupGenerated
+			}
+			depth--
 		case 5:
 			iNdEx += 4
-			return iNdEx, nil
 		default:
 			return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
 		}
+		if iNdEx < 0 {
+			return 0, ErrInvalidLengthGenerated
+		}
+		if depth == 0 {
+			return iNdEx, nil
+		}
 	}
-	panic("unreachable")
+	return 0, io.ErrUnexpectedEOF
 }
 
 var (
-	ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling")
-	ErrIntOverflowGenerated   = fmt.Errorf("proto: integer overflow")
+	ErrInvalidLengthGenerated        = fmt.Errorf("proto: negative length found during unmarshaling")
+	ErrIntOverflowGenerated          = fmt.Errorf("proto: integer overflow")
+	ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group")
 )
diff --git a/vendor/k8s.io/api/discovery/v1beta1/generated.pb.go b/vendor/k8s.io/api/discovery/v1beta1/generated.pb.go
index 2283d12d..ce0046c5 100644
--- a/vendor/k8s.io/api/discovery/v1beta1/generated.pb.go
+++ b/vendor/k8s.io/api/discovery/v1beta1/generated.pb.go
@@ -44,7 +44,7 @@ var _ = math.Inf
 // is compatible with the proto package it is being compiled against.
 // A compilation error at this line likely means your copy of the
 // proto package needs to be updated.
-const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
+const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
 
 func (m *Endpoint) Reset()      { *m = Endpoint{} }
 func (*Endpoint) ProtoMessage() {}
@@ -1621,6 +1621,7 @@ func (m *EndpointSliceList) Unmarshal(dAtA []byte) error {
 func skipGenerated(dAtA []byte) (n int, err error) {
 	l := len(dAtA)
 	iNdEx := 0
+	depth := 0
 	for iNdEx < l {
 		var wire uint64
 		for shift := uint(0); ; shift += 7 {
@@ -1652,10 +1653,8 @@ func skipGenerated(dAtA []byte) (n int, err error) {
 					break
 				}
 			}
-			return iNdEx, nil
 		case 1:
 			iNdEx += 8
-			return iNdEx, nil
 		case 2:
 			var length int
 			for shift := uint(0); ; shift += 7 {
@@ -1676,55 +1675,30 @@ func skipGenerated(dAtA []byte) (n int, err error) {
 				return 0, ErrInvalidLengthGenerated
 			}
 			iNdEx += length
-			if iNdEx < 0 {
-				return 0, ErrInvalidLengthGenerated
-			}
-			return iNdEx, nil
 		case 3:
-			for {
-				var innerWire uint64
-				var start int = iNdEx
-				for shift := uint(0); ; shift += 7 {
-					if shift >= 64 {
-						return 0, ErrIntOverflowGenerated
-					}
-					if iNdEx >= l {
-						return 0, io.ErrUnexpectedEOF
-					}
-					b := dAtA[iNdEx]
-					iNdEx++
-					innerWire |= (uint64(b) & 0x7F) << shift
-					if b < 0x80 {
-						break
-					}
-				}
-				innerWireType := int(innerWire & 0x7)
-				if innerWireType == 4 {
-					break
-				}
-				next, err := skipGenerated(dAtA[start:])
-				if err != nil {
-					return 0, err
-				}
-				iNdEx = start + next
-				if iNdEx < 0 {
-					return 0, ErrInvalidLengthGenerated
-				}
-			}
-			return iNdEx, nil
+			depth++
 		case 4:
-			return iNdEx, nil
+			if depth == 0 {
+				return 0, ErrUnexpectedEndOfGroupGenerated
+			}
+			depth--
 		case 5:
 			iNdEx += 4
-			return iNdEx, nil
 		default:
 			return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
 		}
+		if iNdEx < 0 {
+			return 0, ErrInvalidLengthGenerated
+		}
+		if depth == 0 {
+			return iNdEx, nil
+		}
 	}
-	panic("unreachable")
+	return 0, io.ErrUnexpectedEOF
 }
 
 var (
-	ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling")
-	ErrIntOverflowGenerated   = fmt.Errorf("proto: integer overflow")
+	ErrInvalidLengthGenerated        = fmt.Errorf("proto: negative length found during unmarshaling")
+	ErrIntOverflowGenerated          = fmt.Errorf("proto: integer overflow")
+	ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group")
 )
diff --git a/vendor/k8s.io/api/discovery/v1beta1/generated.proto b/vendor/k8s.io/api/discovery/v1beta1/generated.proto
index cce6f970..581ddf7b 100644
--- a/vendor/k8s.io/api/discovery/v1beta1/generated.proto
+++ b/vendor/k8s.io/api/discovery/v1beta1/generated.proto
@@ -107,8 +107,9 @@ message EndpointPort {
   // This field follows standard Kubernetes label syntax.
   // Un-prefixed names are reserved for IANA standard service names (as per
   // RFC-6335 and http://www.iana.org/assignments/service-names).
-  // Non-standard protocols should use prefixed names.
-  // Default is empty string.
+  // Non-standard protocols should use prefixed names such as
+  // mycompany.com/my-custom-protocol.
+  // +optional
   optional string appProtocol = 4;
 }
 
diff --git a/vendor/k8s.io/api/discovery/v1beta1/types.go b/vendor/k8s.io/api/discovery/v1beta1/types.go
index e3dc5653..20fcde94 100644
--- a/vendor/k8s.io/api/discovery/v1beta1/types.go
+++ b/vendor/k8s.io/api/discovery/v1beta1/types.go
@@ -143,8 +143,9 @@ type EndpointPort struct {
 	// This field follows standard Kubernetes label syntax.
 	// Un-prefixed names are reserved for IANA standard service names (as per
 	// RFC-6335 and http://www.iana.org/assignments/service-names).
-	// Non-standard protocols should use prefixed names.
-	// Default is empty string.
+	// Non-standard protocols should use prefixed names such as
+	// mycompany.com/my-custom-protocol.
+	// +optional
 	AppProtocol *string `json:"appProtocol,omitempty" protobuf:"bytes,4,name=appProtocol"`
 }
 
diff --git a/vendor/k8s.io/api/discovery/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/discovery/v1beta1/types_swagger_doc_generated.go
index 9dd3a035..d67cc721 100644
--- a/vendor/k8s.io/api/discovery/v1beta1/types_swagger_doc_generated.go
+++ b/vendor/k8s.io/api/discovery/v1beta1/types_swagger_doc_generated.go
@@ -54,7 +54,7 @@ var map_EndpointPort = map[string]string{
 	"name":        "The name of this port. All ports in an EndpointSlice must have a unique name. If the EndpointSlice is dervied from a Kubernetes service, this corresponds to the Service.ports[].name. Name must either be an empty string or pass DNS_LABEL validation: * must be no more than 63 characters long. * must consist of lower case alphanumeric characters or '-'. * must start and end with an alphanumeric character. Default is empty string.",
 	"protocol":    "The IP protocol for this port. Must be UDP, TCP, or SCTP. Default is TCP.",
 	"port":        "The port number of the endpoint. If this is not specified, ports are not restricted and must be interpreted in the context of the specific consumer.",
-	"appProtocol": "The application protocol for this port. This field follows standard Kubernetes label syntax. Un-prefixed names are reserved for IANA standard service names (as per RFC-6335 and http://www.iana.org/assignments/service-names). Non-standard protocols should use prefixed names. Default is empty string.",
+	"appProtocol": "The application protocol for this port. This field follows standard Kubernetes label syntax. Un-prefixed names are reserved for IANA standard service names (as per RFC-6335 and http://www.iana.org/assignments/service-names). Non-standard protocols should use prefixed names such as mycompany.com/my-custom-protocol.",
 }
 
 func (EndpointPort) SwaggerDoc() map[string]string {
diff --git a/vendor/k8s.io/api/events/v1beta1/generated.pb.go b/vendor/k8s.io/api/events/v1beta1/generated.pb.go
index 0e9a8e78..923dee5e 100644
--- a/vendor/k8s.io/api/events/v1beta1/generated.pb.go
+++ b/vendor/k8s.io/api/events/v1beta1/generated.pb.go
@@ -42,7 +42,7 @@ var _ = math.Inf
 // is compatible with the proto package it is being compiled against.
 // A compilation error at this line likely means your copy of the
 // proto package needs to be updated.
-const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
+const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
 
 func (m *Event) Reset()      { *m = Event{} }
 func (*Event) ProtoMessage() {}
@@ -1365,6 +1365,7 @@ func (m *EventSeries) Unmarshal(dAtA []byte) error {
 func skipGenerated(dAtA []byte) (n int, err error) {
 	l := len(dAtA)
 	iNdEx := 0
+	depth := 0
 	for iNdEx < l {
 		var wire uint64
 		for shift := uint(0); ; shift += 7 {
@@ -1396,10 +1397,8 @@ func skipGenerated(dAtA []byte) (n int, err error) {
 					break
 				}
 			}
-			return iNdEx, nil
 		case 1:
 			iNdEx += 8
-			return iNdEx, nil
 		case 2:
 			var length int
 			for shift := uint(0); ; shift += 7 {
@@ -1420,55 +1419,30 @@ func skipGenerated(dAtA []byte) (n int, err error) {
 				return 0, ErrInvalidLengthGenerated
 			}
 			iNdEx += length
-			if iNdEx < 0 {
-				return 0, ErrInvalidLengthGenerated
-			}
-			return iNdEx, nil
 		case 3:
-			for {
-				var innerWire uint64
-				var start int = iNdEx
-				for shift := uint(0); ; shift += 7 {
-					if shift >= 64 {
-						return 0, ErrIntOverflowGenerated
-					}
-					if iNdEx >= l {
-						return 0, io.ErrUnexpectedEOF
-					}
-					b := dAtA[iNdEx]
-					iNdEx++
-					innerWire |= (uint64(b) & 0x7F) << shift
-					if b < 0x80 {
-						break
-					}
-				}
-				innerWireType := int(innerWire & 0x7)
-				if innerWireType == 4 {
-					break
-				}
-				next, err := skipGenerated(dAtA[start:])
-				if err != nil {
-					return 0, err
-				}
-				iNdEx = start + next
-				if iNdEx < 0 {
-					return 0, ErrInvalidLengthGenerated
-				}
-			}
-			return iNdEx, nil
+			depth++
 		case 4:
-			return iNdEx, nil
+			if depth == 0 {
+				return 0, ErrUnexpectedEndOfGroupGenerated
+			}
+			depth--
 		case 5:
 			iNdEx += 4
-			return iNdEx, nil
 		default:
 			return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
 		}
+		if iNdEx < 0 {
+			return 0, ErrInvalidLengthGenerated
+		}
+		if depth == 0 {
+			return iNdEx, nil
+		}
 	}
-	panic("unreachable")
+	return 0, io.ErrUnexpectedEOF
 }
 
 var (
-	ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling")
-	ErrIntOverflowGenerated   = fmt.Errorf("proto: integer overflow")
+	ErrInvalidLengthGenerated        = fmt.Errorf("proto: negative length found during unmarshaling")
+	ErrIntOverflowGenerated          = fmt.Errorf("proto: integer overflow")
+	ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group")
 )
diff --git a/vendor/k8s.io/api/extensions/v1beta1/generated.pb.go b/vendor/k8s.io/api/extensions/v1beta1/generated.pb.go
index 65b47eab..bd37f432 100644
--- a/vendor/k8s.io/api/extensions/v1beta1/generated.pb.go
+++ b/vendor/k8s.io/api/extensions/v1beta1/generated.pb.go
@@ -47,7 +47,7 @@ var _ = math.Inf
 // is compatible with the proto package it is being compiled against.
 // A compilation error at this line likely means your copy of the
 // proto package needs to be updated.
-const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
+const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
 
 func (m *AllowedCSIDriver) Reset()      { *m = AllowedCSIDriver{} }
 func (*AllowedCSIDriver) ProtoMessage() {}
@@ -1309,38 +1309,10 @@ func (m *ReplicaSetStatus) XXX_DiscardUnknown() {
 
 var xxx_messageInfo_ReplicaSetStatus proto.InternalMessageInfo
 
-func (m *ReplicationControllerDummy) Reset()      { *m = ReplicationControllerDummy{} }
-func (*ReplicationControllerDummy) ProtoMessage() {}
-func (*ReplicationControllerDummy) Descriptor() ([]byte, []int) {
-	return fileDescriptor_cdc93917efc28165, []int{45}
-}
-func (m *ReplicationControllerDummy) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *ReplicationControllerDummy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	b = b[:cap(b)]
-	n, err := m.MarshalToSizedBuffer(b)
-	if err != nil {
-		return nil, err
-	}
-	return b[:n], nil
-}
-func (m *ReplicationControllerDummy) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_ReplicationControllerDummy.Merge(m, src)
-}
-func (m *ReplicationControllerDummy) XXX_Size() int {
-	return m.Size()
-}
-func (m *ReplicationControllerDummy) XXX_DiscardUnknown() {
-	xxx_messageInfo_ReplicationControllerDummy.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ReplicationControllerDummy proto.InternalMessageInfo
-
 func (m *RollbackConfig) Reset()      { *m = RollbackConfig{} }
 func (*RollbackConfig) ProtoMessage() {}
 func (*RollbackConfig) Descriptor() ([]byte, []int) {
-	return fileDescriptor_cdc93917efc28165, []int{46}
+	return fileDescriptor_cdc93917efc28165, []int{45}
 }
 func (m *RollbackConfig) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -1368,7 +1340,7 @@ var xxx_messageInfo_RollbackConfig proto.InternalMessageInfo
 func (m *RollingUpdateDaemonSet) Reset()      { *m = RollingUpdateDaemonSet{} }
 func (*RollingUpdateDaemonSet) ProtoMessage() {}
 func (*RollingUpdateDaemonSet) Descriptor() ([]byte, []int) {
-	return fileDescriptor_cdc93917efc28165, []int{47}
+	return fileDescriptor_cdc93917efc28165, []int{46}
 }
 func (m *RollingUpdateDaemonSet) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -1396,7 +1368,7 @@ var xxx_messageInfo_RollingUpdateDaemonSet proto.InternalMessageInfo
 func (m *RollingUpdateDeployment) Reset()      { *m = RollingUpdateDeployment{} }
 func (*RollingUpdateDeployment) ProtoMessage() {}
 func (*RollingUpdateDeployment) Descriptor() ([]byte, []int) {
-	return fileDescriptor_cdc93917efc28165, []int{48}
+	return fileDescriptor_cdc93917efc28165, []int{47}
 }
 func (m *RollingUpdateDeployment) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -1424,7 +1396,7 @@ var xxx_messageInfo_RollingUpdateDeployment proto.InternalMessageInfo
 func (m *RunAsGroupStrategyOptions) Reset()      { *m = RunAsGroupStrategyOptions{} }
 func (*RunAsGroupStrategyOptions) ProtoMessage() {}
 func (*RunAsGroupStrategyOptions) Descriptor() ([]byte, []int) {
-	return fileDescriptor_cdc93917efc28165, []int{49}
+	return fileDescriptor_cdc93917efc28165, []int{48}
 }
 func (m *RunAsGroupStrategyOptions) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -1452,7 +1424,7 @@ var xxx_messageInfo_RunAsGroupStrategyOptions proto.InternalMessageInfo
 func (m *RunAsUserStrategyOptions) Reset()      { *m = RunAsUserStrategyOptions{} }
 func (*RunAsUserStrategyOptions) ProtoMessage() {}
 func (*RunAsUserStrategyOptions) Descriptor() ([]byte, []int) {
-	return fileDescriptor_cdc93917efc28165, []int{50}
+	return fileDescriptor_cdc93917efc28165, []int{49}
 }
 func (m *RunAsUserStrategyOptions) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -1480,7 +1452,7 @@ var xxx_messageInfo_RunAsUserStrategyOptions proto.InternalMessageInfo
 func (m *RuntimeClassStrategyOptions) Reset()      { *m = RuntimeClassStrategyOptions{} }
 func (*RuntimeClassStrategyOptions) ProtoMessage() {}
 func (*RuntimeClassStrategyOptions) Descriptor() ([]byte, []int) {
-	return fileDescriptor_cdc93917efc28165, []int{51}
+	return fileDescriptor_cdc93917efc28165, []int{50}
 }
 func (m *RuntimeClassStrategyOptions) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -1508,7 +1480,7 @@ var xxx_messageInfo_RuntimeClassStrategyOptions proto.InternalMessageInfo
 func (m *SELinuxStrategyOptions) Reset()      { *m = SELinuxStrategyOptions{} }
 func (*SELinuxStrategyOptions) ProtoMessage() {}
 func (*SELinuxStrategyOptions) Descriptor() ([]byte, []int) {
-	return fileDescriptor_cdc93917efc28165, []int{52}
+	return fileDescriptor_cdc93917efc28165, []int{51}
 }
 func (m *SELinuxStrategyOptions) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -1536,7 +1508,7 @@ var xxx_messageInfo_SELinuxStrategyOptions proto.InternalMessageInfo
 func (m *Scale) Reset()      { *m = Scale{} }
 func (*Scale) ProtoMessage() {}
 func (*Scale) Descriptor() ([]byte, []int) {
-	return fileDescriptor_cdc93917efc28165, []int{53}
+	return fileDescriptor_cdc93917efc28165, []int{52}
 }
 func (m *Scale) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -1564,7 +1536,7 @@ var xxx_messageInfo_Scale proto.InternalMessageInfo
 func (m *ScaleSpec) Reset()      { *m = ScaleSpec{} }
 func (*ScaleSpec) ProtoMessage() {}
 func (*ScaleSpec) Descriptor() ([]byte, []int) {
-	return fileDescriptor_cdc93917efc28165, []int{54}
+	return fileDescriptor_cdc93917efc28165, []int{53}
 }
 func (m *ScaleSpec) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -1592,7 +1564,7 @@ var xxx_messageInfo_ScaleSpec proto.InternalMessageInfo
 func (m *ScaleStatus) Reset()      { *m = ScaleStatus{} }
 func (*ScaleStatus) ProtoMessage() {}
 func (*ScaleStatus) Descriptor() ([]byte, []int) {
-	return fileDescriptor_cdc93917efc28165, []int{55}
+	return fileDescriptor_cdc93917efc28165, []int{54}
 }
 func (m *ScaleStatus) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -1620,7 +1592,7 @@ var xxx_messageInfo_ScaleStatus proto.InternalMessageInfo
 func (m *SupplementalGroupsStrategyOptions) Reset()      { *m = SupplementalGroupsStrategyOptions{} }
 func (*SupplementalGroupsStrategyOptions) ProtoMessage() {}
 func (*SupplementalGroupsStrategyOptions) Descriptor() ([]byte, []int) {
-	return fileDescriptor_cdc93917efc28165, []int{56}
+	return fileDescriptor_cdc93917efc28165, []int{55}
 }
 func (m *SupplementalGroupsStrategyOptions) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -1692,7 +1664,6 @@ func init() {
 	proto.RegisterType((*ReplicaSetList)(nil), "k8s.io.api.extensions.v1beta1.ReplicaSetList")
 	proto.RegisterType((*ReplicaSetSpec)(nil), "k8s.io.api.extensions.v1beta1.ReplicaSetSpec")
 	proto.RegisterType((*ReplicaSetStatus)(nil), "k8s.io.api.extensions.v1beta1.ReplicaSetStatus")
-	proto.RegisterType((*ReplicationControllerDummy)(nil), "k8s.io.api.extensions.v1beta1.ReplicationControllerDummy")
 	proto.RegisterType((*RollbackConfig)(nil), "k8s.io.api.extensions.v1beta1.RollbackConfig")
 	proto.RegisterType((*RollingUpdateDaemonSet)(nil), "k8s.io.api.extensions.v1beta1.RollingUpdateDaemonSet")
 	proto.RegisterType((*RollingUpdateDeployment)(nil), "k8s.io.api.extensions.v1beta1.RollingUpdateDeployment")
@@ -1712,238 +1683,241 @@ func init() {
 }
 
 var fileDescriptor_cdc93917efc28165 = []byte{
-	// 3684 bytes of a gzipped FileDescriptorProto
-	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x5b, 0x4f, 0x6c, 0x1b, 0x47,
-	0x77, 0xf7, 0x92, 0x94, 0x48, 0x3d, 0xfd, 0x1f, 0xc9, 0x12, 0x3f, 0x3b, 0x16, 0xfd, 0x6d, 0x00,
-	0xd7, 0x49, 0x6d, 0x32, 0x76, 0x6c, 0x7f, 0xae, 0x8d, 0x7e, 0x89, 0x28, 0x59, 0xb6, 0x52, 0xfd,
-	0x61, 0x86, 0x92, 0x1b, 0x04, 0x4d, 0x9a, 0x15, 0x39, 0xa2, 0xd6, 0x5a, 0xee, 0x6e, 0x76, 0x87,
-	0x8a, 0x08, 0xf4, 0xd0, 0x43, 0x51, 0xa0, 0x40, 0x8b, 0xf6, 0x92, 0xb6, 0xc7, 0x06, 0x05, 0x7a,
-	0x6a, 0xd1, 0xde, 0xda, 0x43, 0x10, 0xa0, 0x40, 0x0a, 0x18, 0x45, 0x5a, 0xe4, 0xd6, 0x9c, 0x84,
-	0x46, 0x39, 0x15, 0x3d, 0xf5, 0x56, 0xf8, 0x50, 0x14, 0x33, 0x3b, 0xfb, 0x7f, 0x57, 0x5c, 0x29,
-	0xb6, 0xd0, 0x00, 0xbd, 0x89, 0xf3, 0xde, 0xfb, 0xbd, 0x37, 0x33, 0x6f, 0xde, 0x7b, 0x33, 0xfb,
-	0x04, 0x2b, 0xfb, 0xf7, 0xed, 0xaa, 0x6a, 0xd4, 0xf6, 0x7b, 0x3b, 0xc4, 0xd2, 0x09, 0x25, 0x76,
-	0xed, 0x80, 0xe8, 0x6d, 0xc3, 0xaa, 0x09, 0x82, 0x62, 0xaa, 0x35, 0x72, 0x48, 0x89, 0x6e, 0xab,
-	0x86, 0x6e, 0xd7, 0x0e, 0x6e, 0xed, 0x10, 0xaa, 0xdc, 0xaa, 0x75, 0x88, 0x4e, 0x2c, 0x85, 0x92,
-	0x76, 0xd5, 0xb4, 0x0c, 0x6a, 0xa0, 0x2b, 0x0e, 0x7b, 0x55, 0x31, 0xd5, 0xaa, 0xcf, 0x5e, 0x15,
-	0xec, 0x97, 0x6e, 0x76, 0x54, 0xba, 0xd7, 0xdb, 0xa9, 0xb6, 0x8c, 0x6e, 0xad, 0x63, 0x74, 0x8c,
-	0x1a, 0x97, 0xda, 0xe9, 0xed, 0xf2, 0x5f, 0xfc, 0x07, 0xff, 0xcb, 0x41, 0xbb, 0x24, 0x07, 0x94,
-	0xb7, 0x0c, 0x8b, 0xd4, 0x0e, 0x62, 0x1a, 0x2f, 0xdd, 0xf1, 0x79, 0xba, 0x4a, 0x6b, 0x4f, 0xd5,
-	0x89, 0xd5, 0xaf, 0x99, 0xfb, 0x1d, 0x36, 0x60, 0xd7, 0xba, 0x84, 0x2a, 0x49, 0x52, 0xb5, 0x34,
-	0x29, 0xab, 0xa7, 0x53, 0xb5, 0x4b, 0x62, 0x02, 0xf7, 0x06, 0x09, 0xd8, 0xad, 0x3d, 0xd2, 0x55,
-	0x62, 0x72, 0x6f, 0xa7, 0xc9, 0xf5, 0xa8, 0xaa, 0xd5, 0x54, 0x9d, 0xda, 0xd4, 0x8a, 0x0a, 0xc9,
-	0x77, 0x60, 0x6a, 0x51, 0xd3, 0x8c, 0xcf, 0x48, 0x7b, 0xa9, 0xb9, 0xba, 0x6c, 0xa9, 0x07, 0xc4,
-	0x42, 0x57, 0xa1, 0xa0, 0x2b, 0x5d, 0x52, 0x96, 0xae, 0x4a, 0xd7, 0x47, 0xea, 0x63, 0xcf, 0x8f,
-	0x2a, 0x17, 0x8e, 0x8f, 0x2a, 0x85, 0x0d, 0xa5, 0x4b, 0x30, 0xa7, 0xc8, 0x0f, 0x61, 0x5a, 0x48,
-	0xad, 0x68, 0xe4, 0xf0, 0xa9, 0xa1, 0xf5, 0xba, 0x04, 0x5d, 0x83, 0xe1, 0x36, 0x07, 0x10, 0x82,
-	0x13, 0x42, 0x70, 0xd8, 0x81, 0xc5, 0x82, 0x2a, 0xdb, 0x30, 0x29, 0x84, 0x9f, 0x18, 0x36, 0x6d,
-	0x28, 0x74, 0x0f, 0xdd, 0x06, 0x30, 0x15, 0xba, 0xd7, 0xb0, 0xc8, 0xae, 0x7a, 0x28, 0xc4, 0x91,
-	0x10, 0x87, 0x86, 0x47, 0xc1, 0x01, 0x2e, 0x74, 0x03, 0x4a, 0x16, 0x51, 0xda, 0x9b, 0xba, 0xd6,
-	0x2f, 0xe7, 0xae, 0x4a, 0xd7, 0x4b, 0xf5, 0x29, 0x21, 0x51, 0xc2, 0x62, 0x1c, 0x7b, 0x1c, 0xf2,
-	0xe7, 0x39, 0x18, 0x59, 0x56, 0x48, 0xd7, 0xd0, 0x9b, 0x84, 0xa2, 0x4f, 0xa0, 0xc4, 0xb6, 0xab,
-	0xad, 0x50, 0x85, 0x6b, 0x1b, 0xbd, 0xfd, 0x56, 0xd5, 0x77, 0x27, 0x6f, 0xf5, 0xaa, 0xe6, 0x7e,
-	0x87, 0x0d, 0xd8, 0x55, 0xc6, 0x5d, 0x3d, 0xb8, 0x55, 0xdd, 0xdc, 0x79, 0x46, 0x5a, 0x74, 0x9d,
-	0x50, 0xc5, 0xb7, 0xcf, 0x1f, 0xc3, 0x1e, 0x2a, 0xda, 0x80, 0x82, 0x6d, 0x92, 0x16, 0xb7, 0x6c,
-	0xf4, 0xf6, 0x8d, 0xea, 0x89, 0xce, 0x5a, 0xf5, 0x2c, 0x6b, 0x9a, 0xa4, 0xe5, 0xaf, 0x38, 0xfb,
-	0x85, 0x39, 0x0e, 0x7a, 0x0a, 0xc3, 0x36, 0x55, 0x68, 0xcf, 0x2e, 0xe7, 0x39, 0x62, 0x35, 0x33,
-	0x22, 0x97, 0xf2, 0x37, 0xc3, 0xf9, 0x8d, 0x05, 0x9a, 0xfc, 0x1f, 0x39, 0x40, 0x1e, 0xef, 0x92,
-	0xa1, 0xb7, 0x55, 0xaa, 0x1a, 0x3a, 0x7a, 0x00, 0x05, 0xda, 0x37, 0x5d, 0x17, 0xb8, 0xe6, 0x1a,
-	0xb4, 0xd5, 0x37, 0xc9, 0x8b, 0xa3, 0xca, 0x5c, 0x5c, 0x82, 0x51, 0x30, 0x97, 0x41, 0x6b, 0x9e,
-	0xa9, 0x39, 0x2e, 0x7d, 0x27, 0xac, 0xfa, 0xc5, 0x51, 0x25, 0xe1, 0xb0, 0x55, 0x3d, 0xa4, 0xb0,
-	0x81, 0xe8, 0x00, 0x90, 0xa6, 0xd8, 0x74, 0xcb, 0x52, 0x74, 0xdb, 0xd1, 0xa4, 0x76, 0x89, 0x58,
-	0x84, 0x37, 0xb3, 0x6d, 0x1a, 0x93, 0xa8, 0x5f, 0x12, 0x56, 0xa0, 0xb5, 0x18, 0x1a, 0x4e, 0xd0,
-	0xc0, 0xbc, 0xd9, 0x22, 0x8a, 0x6d, 0xe8, 0xe5, 0x42, 0xd8, 0x9b, 0x31, 0x1f, 0xc5, 0x82, 0x8a,
-	0xde, 0x80, 0x62, 0x97, 0xd8, 0xb6, 0xd2, 0x21, 0xe5, 0x21, 0xce, 0x38, 0x29, 0x18, 0x8b, 0xeb,
-	0xce, 0x30, 0x76, 0xe9, 0xf2, 0x97, 0x12, 0x8c, 0x7b, 0x2b, 0xb7, 0xa6, 0xda, 0x14, 0xfd, 0x56,
-	0xcc, 0x0f, 0xab, 0xd9, 0xa6, 0xc4, 0xa4, 0xb9, 0x17, 0x7a, 0x3e, 0xef, 0x8e, 0x04, 0x7c, 0x70,
-	0x1d, 0x86, 0x54, 0x4a, 0xba, 0x6c, 0x1f, 0xf2, 0xd7, 0x47, 0x6f, 0x5f, 0xcf, 0xea, 0x32, 0xf5,
-	0x71, 0x01, 0x3a, 0xb4, 0xca, 0xc4, 0xb1, 0x83, 0x22, 0xff, 0x69, 0x21, 0x60, 0x3e, 0x73, 0x4d,
-	0xf4, 0x11, 0x94, 0x6c, 0xa2, 0x91, 0x16, 0x35, 0x2c, 0x61, 0xfe, 0xdb, 0x19, 0xcd, 0x57, 0x76,
-	0x88, 0xd6, 0x14, 0xa2, 0xf5, 0x31, 0x66, 0xbf, 0xfb, 0x0b, 0x7b, 0x90, 0xe8, 0x7d, 0x28, 0x51,
-	0xd2, 0x35, 0x35, 0x85, 0x12, 0x71, 0x8e, 0x5e, 0x0f, 0x4e, 0x81, 0x79, 0x0e, 0x03, 0x6b, 0x18,
-	0xed, 0x2d, 0xc1, 0xc6, 0x8f, 0x8f, 0xb7, 0x24, 0xee, 0x28, 0xf6, 0x60, 0xd0, 0x01, 0x4c, 0xf4,
-	0xcc, 0x36, 0xe3, 0xa4, 0x2c, 0x0a, 0x76, 0xfa, 0xc2, 0x93, 0xee, 0x65, 0x5d, 0x9b, 0xed, 0x90,
-	0x74, 0x7d, 0x4e, 0xe8, 0x9a, 0x08, 0x8f, 0xe3, 0x88, 0x16, 0xb4, 0x08, 0x93, 0x5d, 0x55, 0x67,
-	0x71, 0xa9, 0xdf, 0x24, 0x2d, 0x43, 0x6f, 0xdb, 0xdc, 0xad, 0x86, 0xea, 0xf3, 0x02, 0x60, 0x72,
-	0x3d, 0x4c, 0xc6, 0x51, 0x7e, 0xf4, 0x1e, 0x20, 0x77, 0x1a, 0x8f, 0x9d, 0x20, 0xae, 0x1a, 0x3a,
-	0xf7, 0xb9, 0xbc, 0xef, 0xdc, 0x5b, 0x31, 0x0e, 0x9c, 0x20, 0x85, 0xd6, 0x60, 0xd6, 0x22, 0x07,
-	0x2a, 0x9b, 0xe3, 0x13, 0xd5, 0xa6, 0x86, 0xd5, 0x5f, 0x53, 0xbb, 0x2a, 0x2d, 0x0f, 0x73, 0x9b,
-	0xca, 0xc7, 0x47, 0x95, 0x59, 0x9c, 0x40, 0xc7, 0x89, 0x52, 0xf2, 0x9f, 0x0d, 0xc3, 0x64, 0x24,
-	0xde, 0xa0, 0xa7, 0x30, 0xd7, 0xea, 0x59, 0x16, 0xd1, 0xe9, 0x46, 0xaf, 0xbb, 0x43, 0xac, 0x66,
-	0x6b, 0x8f, 0xb4, 0x7b, 0x1a, 0x69, 0x73, 0x47, 0x19, 0xaa, 0x2f, 0x08, 0x8b, 0xe7, 0x96, 0x12,
-	0xb9, 0x70, 0x8a, 0x34, 0x5b, 0x05, 0x9d, 0x0f, 0xad, 0xab, 0xb6, 0xed, 0x61, 0xe6, 0x38, 0xa6,
-	0xb7, 0x0a, 0x1b, 0x31, 0x0e, 0x9c, 0x20, 0xc5, 0x6c, 0x6c, 0x13, 0x5b, 0xb5, 0x48, 0x3b, 0x6a,
-	0x63, 0x3e, 0x6c, 0xe3, 0x72, 0x22, 0x17, 0x4e, 0x91, 0x46, 0x77, 0x61, 0xd4, 0xd1, 0xc6, 0xf7,
-	0x4f, 0x6c, 0xf4, 0x8c, 0x00, 0x1b, 0xdd, 0xf0, 0x49, 0x38, 0xc8, 0xc7, 0xa6, 0x66, 0xec, 0xd8,
-	0xc4, 0x3a, 0x20, 0xed, 0xf4, 0x0d, 0xde, 0x8c, 0x71, 0xe0, 0x04, 0x29, 0x36, 0x35, 0xc7, 0x03,
-	0x63, 0x53, 0x1b, 0x0e, 0x4f, 0x6d, 0x3b, 0x91, 0x0b, 0xa7, 0x48, 0x33, 0x3f, 0x76, 0x4c, 0x5e,
-	0x3c, 0x50, 0x54, 0x4d, 0xd9, 0xd1, 0x48, 0xb9, 0x18, 0xf6, 0xe3, 0x8d, 0x30, 0x19, 0x47, 0xf9,
-	0xd1, 0x63, 0x98, 0x76, 0x86, 0xb6, 0x75, 0xc5, 0x03, 0x29, 0x71, 0x90, 0x9f, 0x09, 0x90, 0xe9,
-	0x8d, 0x28, 0x03, 0x8e, 0xcb, 0xa0, 0x07, 0x30, 0xd1, 0x32, 0x34, 0x8d, 0xfb, 0xe3, 0x92, 0xd1,
-	0xd3, 0x69, 0x79, 0x84, 0xa3, 0x20, 0x76, 0x1e, 0x97, 0x42, 0x14, 0x1c, 0xe1, 0x44, 0x04, 0xa0,
-	0xe5, 0x26, 0x1c, 0xbb, 0x0c, 0x3c, 0x3e, 0xde, 0xca, 0x1a, 0x03, 0xbc, 0x54, 0xe5, 0xd7, 0x00,
-	0xde, 0x90, 0x8d, 0x03, 0xc0, 0xf2, 0x3f, 0x4b, 0x30, 0x9f, 0x12, 0x3a, 0xd0, 0x3b, 0xa1, 0x14,
-	0xfb, 0xab, 0x91, 0x14, 0x7b, 0x39, 0x45, 0x2c, 0x90, 0x67, 0x75, 0x18, 0xb7, 0xd8, 0xac, 0xf4,
-	0x8e, 0xc3, 0x22, 0x62, 0xe4, 0xdd, 0x01, 0xd3, 0xc0, 0x41, 0x19, 0x3f, 0xe6, 0x4f, 0x1f, 0x1f,
-	0x55, 0xc6, 0x43, 0x34, 0x1c, 0x86, 0x97, 0xff, 0x3c, 0x07, 0xb0, 0x4c, 0x4c, 0xcd, 0xe8, 0x77,
-	0x89, 0x7e, 0x1e, 0x35, 0xd4, 0x66, 0xa8, 0x86, 0xba, 0x39, 0x68, 0x7b, 0x3c, 0xd3, 0x52, 0x8b,
-	0xa8, 0xdf, 0x8c, 0x14, 0x51, 0xb5, 0xec, 0x90, 0x27, 0x57, 0x51, 0xff, 0x96, 0x87, 0x19, 0x9f,
-	0xd9, 0x2f, 0xa3, 0x1e, 0x86, 0xf6, 0xf8, 0x57, 0x22, 0x7b, 0x3c, 0x9f, 0x20, 0xf2, 0xca, 0xea,
-	0xa8, 0x67, 0x30, 0xc1, 0xaa, 0x1c, 0x67, 0x2f, 0x79, 0x0d, 0x35, 0x7c, 0xea, 0x1a, 0xca, 0xcb,
-	0x76, 0x6b, 0x21, 0x24, 0x1c, 0x41, 0x4e, 0xa9, 0xd9, 0x8a, 0x3f, 0xc5, 0x9a, 0xed, 0x2b, 0x09,
-	0x26, 0xfc, 0x6d, 0x3a, 0x87, 0xa2, 0x6d, 0x23, 0x5c, 0xb4, 0xbd, 0x91, 0xd9, 0x45, 0x53, 0xaa,
-	0xb6, 0xff, 0x66, 0x05, 0xbe, 0xc7, 0xc4, 0x0e, 0xf8, 0x8e, 0xd2, 0xda, 0x1f, 0x7c, 0xc7, 0x43,
-	0x9f, 0x4b, 0x80, 0x44, 0x16, 0x58, 0xd4, 0x75, 0x83, 0x2a, 0x4e, 0xac, 0x74, 0xcc, 0x5a, 0xcd,
-	0x6c, 0x96, 0xab, 0xb1, 0xba, 0x1d, 0xc3, 0x7a, 0xa4, 0x53, 0xab, 0xef, 0x6f, 0x72, 0x9c, 0x01,
-	0x27, 0x18, 0x80, 0x14, 0x00, 0x4b, 0x60, 0x6e, 0x19, 0xe2, 0x20, 0xdf, 0xcc, 0x10, 0xf3, 0x98,
-	0xc0, 0x92, 0xa1, 0xef, 0xaa, 0x1d, 0x3f, 0xec, 0x60, 0x0f, 0x08, 0x07, 0x40, 0x2f, 0x3d, 0x82,
-	0xf9, 0x14, 0x6b, 0xd1, 0x14, 0xe4, 0xf7, 0x49, 0xdf, 0x59, 0x36, 0xcc, 0xfe, 0x44, 0xb3, 0x30,
-	0x74, 0xa0, 0x68, 0x3d, 0x27, 0xfc, 0x8e, 0x60, 0xe7, 0xc7, 0x83, 0xdc, 0x7d, 0x49, 0xfe, 0x72,
-	0x28, 0xe8, 0x3b, 0xbc, 0x62, 0xbe, 0xce, 0x2e, 0xad, 0xa6, 0xa6, 0xb6, 0x14, 0x5b, 0x14, 0x42,
-	0x63, 0xce, 0x85, 0xd5, 0x19, 0xc3, 0x1e, 0x35, 0x54, 0x5b, 0xe7, 0x5e, 0x6d, 0x6d, 0x9d, 0x7f,
-	0x39, 0xb5, 0xf5, 0x6f, 0x43, 0xc9, 0x76, 0xab, 0xea, 0x02, 0x87, 0xbc, 0x75, 0x8a, 0xf8, 0x2a,
-	0x0a, 0x6a, 0x4f, 0x81, 0x57, 0x4a, 0x7b, 0xa0, 0x49, 0x45, 0xf4, 0xd0, 0x29, 0x8b, 0xe8, 0x97,
-	0x5a, 0xf8, 0xb2, 0x78, 0x63, 0x2a, 0x3d, 0x9b, 0xb4, 0x79, 0x6c, 0x2b, 0xf9, 0xf1, 0xa6, 0xc1,
-	0x47, 0xb1, 0xa0, 0xa2, 0x8f, 0x42, 0x2e, 0x5b, 0x3a, 0x8b, 0xcb, 0x4e, 0xa4, 0xbb, 0x2b, 0xda,
-	0x86, 0x79, 0xd3, 0x32, 0x3a, 0x16, 0xb1, 0xed, 0x65, 0xa2, 0xb4, 0x35, 0x55, 0x27, 0xee, 0xfa,
-	0x38, 0x15, 0xd1, 0xe5, 0xe3, 0xa3, 0xca, 0x7c, 0x23, 0x99, 0x05, 0xa7, 0xc9, 0xca, 0xcf, 0x0b,
-	0x30, 0x15, 0xcd, 0x80, 0x29, 0x45, 0xaa, 0x74, 0xa6, 0x22, 0xf5, 0x46, 0xe0, 0x30, 0x38, 0x15,
-	0x7c, 0xe0, 0x05, 0x27, 0x76, 0x20, 0x16, 0x61, 0x52, 0x44, 0x03, 0x97, 0x28, 0xca, 0x74, 0x6f,
-	0xf7, 0xb7, 0xc3, 0x64, 0x1c, 0xe5, 0x47, 0x0f, 0x61, 0xdc, 0xe2, 0x75, 0xb7, 0x0b, 0xe0, 0xd4,
-	0xae, 0x17, 0x05, 0xc0, 0x38, 0x0e, 0x12, 0x71, 0x98, 0x97, 0xd5, 0xad, 0x7e, 0x39, 0xea, 0x02,
-	0x14, 0xc2, 0x75, 0xeb, 0x62, 0x94, 0x01, 0xc7, 0x65, 0xd0, 0x3a, 0xcc, 0xf4, 0xf4, 0x38, 0x94,
-	0xe3, 0xca, 0x97, 0x05, 0xd4, 0xcc, 0x76, 0x9c, 0x05, 0x27, 0xc9, 0xa1, 0xdd, 0x50, 0x29, 0x3b,
-	0xcc, 0xc3, 0xf3, 0xed, 0xcc, 0x07, 0x2f, 0x73, 0x2d, 0x9b, 0x50, 0x6e, 0x97, 0xb2, 0x96, 0xdb,
-	0xf2, 0x3f, 0x4a, 0xc1, 0x24, 0xe4, 0x95, 0xc0, 0x83, 0x5e, 0x99, 0x62, 0x12, 0x81, 0xea, 0xc8,
-	0x48, 0xae, 0x7e, 0xef, 0x9d, 0xaa, 0xfa, 0xf5, 0x93, 0xe7, 0xe0, 0xf2, 0xf7, 0x0b, 0x09, 0xe6,
-	0x56, 0x9a, 0x8f, 0x2d, 0xa3, 0x67, 0xba, 0xe6, 0x6c, 0x9a, 0xce, 0xd2, 0xfc, 0x02, 0x0a, 0x56,
-	0x4f, 0x73, 0xe7, 0xf1, 0xba, 0x3b, 0x0f, 0xdc, 0xd3, 0xd8, 0x3c, 0x66, 0x22, 0x52, 0xce, 0x24,
-	0x98, 0x00, 0xda, 0x80, 0x61, 0x4b, 0xd1, 0x3b, 0xc4, 0x4d, 0xab, 0xd7, 0x06, 0x58, 0xbf, 0xba,
-	0x8c, 0x19, 0x7b, 0xa0, 0xb0, 0xe1, 0xd2, 0x58, 0xa0, 0xc8, 0x7f, 0x24, 0xc1, 0xe4, 0x93, 0xad,
-	0xad, 0xc6, 0xaa, 0xce, 0x4f, 0x34, 0x7f, 0x5b, 0xbd, 0x0a, 0x05, 0x53, 0xa1, 0x7b, 0xd1, 0x4c,
-	0xcf, 0x68, 0x98, 0x53, 0xd0, 0x07, 0x50, 0x64, 0x91, 0x84, 0xe8, 0xed, 0x8c, 0xa5, 0xb6, 0x80,
-	0xaf, 0x3b, 0x42, 0x7e, 0xf5, 0x24, 0x06, 0xb0, 0x0b, 0x27, 0xef, 0xc3, 0x6c, 0xc0, 0x1c, 0xb6,
-	0x1e, 0x4f, 0x59, 0x76, 0x44, 0x4d, 0x18, 0x62, 0x9a, 0x59, 0x0e, 0xcc, 0x67, 0x78, 0xcc, 0x8c,
-	0x4c, 0xc9, 0xaf, 0x74, 0xd8, 0x2f, 0x1b, 0x3b, 0x58, 0xf2, 0x3a, 0x8c, 0xf3, 0x07, 0x65, 0xc3,
-	0xa2, 0x7c, 0x59, 0xd0, 0x15, 0xc8, 0x77, 0x55, 0x5d, 0xe4, 0xd9, 0x51, 0x21, 0x93, 0x67, 0x39,
-	0x82, 0x8d, 0x73, 0xb2, 0x72, 0x28, 0x22, 0x8f, 0x4f, 0x56, 0x0e, 0x31, 0x1b, 0x97, 0x1f, 0x43,
-	0x51, 0x2c, 0x77, 0x10, 0x28, 0x7f, 0x32, 0x50, 0x3e, 0x01, 0x68, 0x13, 0x8a, 0xab, 0x8d, 0xba,
-	0x66, 0x38, 0x55, 0x57, 0x4b, 0x6d, 0x5b, 0xd1, 0xbd, 0x58, 0x5a, 0x5d, 0xc6, 0x98, 0x53, 0x90,
-	0x0c, 0xc3, 0xe4, 0xb0, 0x45, 0x4c, 0xca, 0x3d, 0x62, 0xa4, 0x0e, 0x6c, 0x97, 0x1f, 0xf1, 0x11,
-	0x2c, 0x28, 0xf2, 0x1f, 0xe7, 0xa0, 0x28, 0x96, 0xe3, 0x1c, 0x6e, 0x61, 0x6b, 0xa1, 0x5b, 0xd8,
-	0x9b, 0xd9, 0x5c, 0x23, 0xf5, 0x0a, 0xb6, 0x15, 0xb9, 0x82, 0xdd, 0xc8, 0x88, 0x77, 0xf2, 0xfd,
-	0xeb, 0xef, 0x24, 0x98, 0x08, 0x3b, 0x25, 0xba, 0x0b, 0xa3, 0x2c, 0xe1, 0xa8, 0x2d, 0xb2, 0xe1,
-	0xd7, 0xb9, 0xde, 0x23, 0x4c, 0xd3, 0x27, 0xe1, 0x20, 0x1f, 0xea, 0x78, 0x62, 0xcc, 0x8f, 0xc4,
-	0xa4, 0xd3, 0x97, 0xb4, 0x47, 0x55, 0xad, 0xea, 0x7c, 0x5a, 0xa9, 0xae, 0xea, 0x74, 0xd3, 0x6a,
-	0x52, 0x4b, 0xd5, 0x3b, 0x31, 0x45, 0xdc, 0x29, 0x83, 0xc8, 0xf2, 0x3f, 0x48, 0x30, 0x2a, 0x4c,
-	0x3e, 0x87, 0x5b, 0xc5, 0x6f, 0x84, 0x6f, 0x15, 0xd7, 0x32, 0x1e, 0xf0, 0xe4, 0x2b, 0xc5, 0x5f,
-	0xf9, 0xa6, 0xb3, 0x23, 0xcd, 0xbc, 0x7a, 0xcf, 0xb0, 0x69, 0xd4, 0xab, 0xd9, 0x61, 0xc4, 0x9c,
-	0x82, 0x7a, 0x30, 0xa5, 0x46, 0x62, 0x80, 0x58, 0xda, 0x5a, 0x36, 0x4b, 0x3c, 0xb1, 0x7a, 0x59,
-	0xc0, 0x4f, 0x45, 0x29, 0x38, 0xa6, 0x42, 0x26, 0x10, 0xe3, 0x42, 0xef, 0x43, 0x61, 0x8f, 0x52,
-	0x33, 0xe1, 0xbd, 0x7a, 0x40, 0xe4, 0xf1, 0x4d, 0x28, 0xf1, 0xd9, 0x6d, 0x6d, 0x35, 0x30, 0x87,
-	0x92, 0xff, 0xc7, 0x5f, 0x8f, 0xa6, 0xe3, 0xe3, 0x5e, 0x3c, 0x95, 0xce, 0x12, 0x4f, 0x47, 0x93,
-	0x62, 0x29, 0x7a, 0x02, 0x79, 0xaa, 0x65, 0xbd, 0x16, 0x0a, 0xc4, 0xad, 0xb5, 0xa6, 0x1f, 0x90,
-	0xb6, 0xd6, 0x9a, 0x98, 0x41, 0xa0, 0x4d, 0x18, 0x62, 0xd9, 0x87, 0x1d, 0xc1, 0x7c, 0xf6, 0x23,
-	0xcd, 0xe6, 0xef, 0x3b, 0x04, 0xfb, 0x65, 0x63, 0x07, 0x47, 0xfe, 0x14, 0xc6, 0x43, 0xe7, 0x14,
-	0x7d, 0x02, 0x63, 0x9a, 0xa1, 0xb4, 0xeb, 0x8a, 0xa6, 0xe8, 0x2d, 0xe2, 0x7e, 0x1c, 0xb8, 0x96,
-	0x74, 0xc3, 0x58, 0x0b, 0xf0, 0x89, 0x53, 0x3e, 0x2b, 0x94, 0x8c, 0x05, 0x69, 0x38, 0x84, 0x28,
-	0x2b, 0x00, 0xfe, 0x1c, 0x51, 0x05, 0x86, 0x98, 0x9f, 0x39, 0xf9, 0x64, 0xa4, 0x3e, 0xc2, 0x2c,
-	0x64, 0xee, 0x67, 0x63, 0x67, 0x1c, 0xdd, 0x06, 0xb0, 0x49, 0xcb, 0x22, 0x94, 0x07, 0x83, 0x5c,
-	0xf8, 0x03, 0x63, 0xd3, 0xa3, 0xe0, 0x00, 0x97, 0xfc, 0x4f, 0x12, 0x8c, 0x6f, 0x10, 0xfa, 0x99,
-	0x61, 0xed, 0x37, 0x0c, 0x4d, 0x6d, 0xf5, 0xcf, 0x21, 0xd8, 0xe2, 0x50, 0xb0, 0x7d, 0x6b, 0xc0,
-	0xce, 0x84, 0xac, 0x4b, 0x0b, 0xb9, 0xf2, 0x57, 0x12, 0xcc, 0x87, 0x38, 0x1f, 0xf9, 0x47, 0x77,
-	0x1b, 0x86, 0x4c, 0xc3, 0xa2, 0x6e, 0x22, 0x3e, 0x95, 0x42, 0x16, 0xc6, 0x02, 0xa9, 0x98, 0xc1,
-	0x60, 0x07, 0x0d, 0xad, 0x41, 0x8e, 0x1a, 0xc2, 0x55, 0x4f, 0x87, 0x49, 0x88, 0x55, 0x07, 0x81,
-	0x99, 0xdb, 0x32, 0x70, 0x8e, 0x1a, 0x6c, 0x23, 0xca, 0x21, 0xae, 0x60, 0xf0, 0x79, 0x45, 0x33,
-	0xc0, 0x50, 0xd8, 0xb5, 0x8c, 0xee, 0x99, 0xe7, 0xe0, 0x6d, 0xc4, 0x8a, 0x65, 0x74, 0x31, 0xc7,
-	0x92, 0xbf, 0x96, 0x60, 0x3a, 0xc4, 0x79, 0x0e, 0x81, 0xff, 0xfd, 0x70, 0xe0, 0xbf, 0x71, 0x9a,
-	0x89, 0xa4, 0x84, 0xff, 0xaf, 0x73, 0x91, 0x69, 0xb0, 0x09, 0xa3, 0x5d, 0x18, 0x35, 0x8d, 0x76,
-	0xf3, 0x25, 0x7c, 0x0e, 0x9c, 0x64, 0x79, 0xb3, 0xe1, 0x63, 0xe1, 0x20, 0x30, 0x3a, 0x84, 0x69,
-	0x5d, 0xe9, 0x12, 0xdb, 0x54, 0x5a, 0xa4, 0xf9, 0x12, 0x1e, 0x48, 0x2e, 0xf2, 0xef, 0x0d, 0x51,
-	0x44, 0x1c, 0x57, 0x82, 0xd6, 0xa1, 0xa8, 0x9a, 0xbc, 0x8e, 0x13, 0xb5, 0xcb, 0xc0, 0x2c, 0xea,
-	0x54, 0x7d, 0x4e, 0x3c, 0x17, 0x3f, 0xb0, 0x8b, 0x21, 0xff, 0x75, 0xd4, 0x1b, 0x98, 0xff, 0xa1,
-	0xc7, 0x50, 0xe2, 0x8d, 0x19, 0x2d, 0x43, 0x73, 0xbf, 0x0c, 0xb0, 0x9d, 0x6d, 0x88, 0xb1, 0x17,
-	0x47, 0x95, 0xcb, 0x09, 0x8f, 0xbe, 0x2e, 0x19, 0x7b, 0xc2, 0x68, 0x03, 0x0a, 0xe6, 0x8f, 0xa9,
-	0x60, 0x78, 0x92, 0xe3, 0x65, 0x0b, 0xc7, 0x91, 0x7f, 0x2f, 0x1f, 0x31, 0x97, 0xa7, 0xba, 0x67,
-	0x2f, 0x6d, 0xd7, 0xbd, 0x8a, 0x29, 0x75, 0xe7, 0x77, 0xa0, 0x28, 0x32, 0xbc, 0x70, 0xe6, 0x5f,
-	0x9c, 0xc6, 0x99, 0x83, 0x59, 0xcc, 0xbb, 0xb0, 0xb8, 0x83, 0x2e, 0x30, 0xfa, 0x18, 0x86, 0x89,
-	0xa3, 0xc2, 0xc9, 0x8d, 0xf7, 0x4e, 0xa3, 0xc2, 0x8f, 0xab, 0x7e, 0xa1, 0x2a, 0xc6, 0x04, 0x2a,
-	0x7a, 0x87, 0xad, 0x17, 0xe3, 0x65, 0x97, 0x40, 0xbb, 0x5c, 0xe0, 0xe9, 0xea, 0x8a, 0x33, 0x6d,
-	0x6f, 0xf8, 0xc5, 0x51, 0x05, 0xfc, 0x9f, 0x38, 0x28, 0x21, 0xff, 0x8b, 0x04, 0xd3, 0x7c, 0x85,
-	0x5a, 0x3d, 0x4b, 0xa5, 0xfd, 0x73, 0x4b, 0x4c, 0x4f, 0x43, 0x89, 0xe9, 0xce, 0x80, 0x65, 0x89,
-	0x59, 0x98, 0x9a, 0x9c, 0xbe, 0x91, 0xe0, 0x62, 0x8c, 0xfb, 0x1c, 0xe2, 0xe2, 0x76, 0x38, 0x2e,
-	0xbe, 0x75, 0xda, 0x09, 0xa5, 0xc4, 0xc6, 0xff, 0x9a, 0x4e, 0x98, 0x0e, 0x3f, 0x29, 0xb7, 0x01,
-	0x4c, 0x4b, 0x3d, 0x50, 0x35, 0xd2, 0x11, 0x1f, 0xc1, 0x4b, 0x81, 0x16, 0x27, 0x8f, 0x82, 0x03,
-	0x5c, 0xc8, 0x86, 0xb9, 0x36, 0xd9, 0x55, 0x7a, 0x1a, 0x5d, 0x6c, 0xb7, 0x97, 0x14, 0x53, 0xd9,
-	0x51, 0x35, 0x95, 0xaa, 0xe2, 0xb9, 0x60, 0xa4, 0xfe, 0xd0, 0xf9, 0x38, 0x9d, 0xc4, 0xf1, 0xe2,
-	0xa8, 0x72, 0x25, 0xe9, 0xeb, 0x90, 0xcb, 0xd2, 0xc7, 0x29, 0xd0, 0xa8, 0x0f, 0x65, 0x8b, 0x7c,
-	0xda, 0x53, 0x2d, 0xd2, 0x5e, 0xb6, 0x0c, 0x33, 0xa4, 0x36, 0xcf, 0xd5, 0xfe, 0xfa, 0xf1, 0x51,
-	0xa5, 0x8c, 0x53, 0x78, 0x06, 0x2b, 0x4e, 0x85, 0x47, 0xcf, 0x60, 0x46, 0x11, 0xcd, 0x68, 0x41,
-	0xad, 0xce, 0x29, 0xb9, 0x7f, 0x7c, 0x54, 0x99, 0x59, 0x8c, 0x93, 0x07, 0x2b, 0x4c, 0x02, 0x45,
-	0x35, 0x28, 0x1e, 0xf0, 0xbe, 0x35, 0xbb, 0x3c, 0xc4, 0xf1, 0x59, 0x22, 0x28, 0x3a, 0xad, 0x6c,
-	0x0c, 0x73, 0x78, 0xa5, 0xc9, 0x4f, 0x9f, 0xcb, 0xc5, 0x2e, 0x94, 0xac, 0x96, 0x14, 0x27, 0x9e,
-	0xbf, 0x18, 0x97, 0xfc, 0xa8, 0xf5, 0xc4, 0x27, 0xe1, 0x20, 0x1f, 0xfa, 0x08, 0x46, 0xf6, 0xc4,
-	0xab, 0x84, 0x5d, 0x2e, 0x66, 0x4a, 0xc2, 0xa1, 0x57, 0x8c, 0xfa, 0xb4, 0x50, 0x31, 0xe2, 0x0e,
-	0xdb, 0xd8, 0x47, 0x44, 0x6f, 0x40, 0x91, 0xff, 0x58, 0x5d, 0xe6, 0xcf, 0x71, 0x25, 0x3f, 0xb6,
-	0x3d, 0x71, 0x86, 0xb1, 0x4b, 0x77, 0x59, 0x57, 0x1b, 0x4b, 0xfc, 0x59, 0x38, 0xc2, 0xba, 0xda,
-	0x58, 0xc2, 0x2e, 0x1d, 0x7d, 0x02, 0x45, 0x9b, 0xac, 0xa9, 0x7a, 0xef, 0xb0, 0x0c, 0x99, 0x3e,
-	0x2a, 0x37, 0x1f, 0x71, 0xee, 0xc8, 0xc3, 0x98, 0xaf, 0x41, 0xd0, 0xb1, 0x0b, 0x8b, 0xf6, 0x60,
-	0xc4, 0xea, 0xe9, 0x8b, 0xf6, 0xb6, 0x4d, 0xac, 0xf2, 0x28, 0xd7, 0x31, 0x28, 0x9c, 0x63, 0x97,
-	0x3f, 0xaa, 0xc5, 0x5b, 0x21, 0x8f, 0x03, 0xfb, 0xe0, 0x68, 0x0f, 0x80, 0xff, 0xe0, 0x6f, 0x70,
-	0xe5, 0x39, 0xae, 0xea, 0x7e, 0x16, 0x55, 0x49, 0x4f, 0x7d, 0xe2, 0x1d, 0xde, 0x23, 0xe3, 0x00,
-	0x36, 0xfa, 0x43, 0x09, 0x90, 0xdd, 0x33, 0x4d, 0x8d, 0x74, 0x89, 0x4e, 0x15, 0x8d, 0x8f, 0xda,
-	0xe5, 0x31, 0xae, 0xf2, 0xdd, 0x41, 0x2b, 0x18, 0x13, 0x8c, 0xaa, 0xf6, 0x9e, 0xd7, 0xe3, 0xac,
-	0x38, 0x41, 0x2f, 0xdb, 0xc4, 0x5d, 0x31, 0xeb, 0xf1, 0x4c, 0x9b, 0x98, 0xfc, 0xba, 0xe9, 0x6f,
-	0xa2, 0xa0, 0x63, 0x17, 0x16, 0x3d, 0x85, 0x39, 0xb7, 0xc1, 0x12, 0x1b, 0x06, 0x5d, 0x51, 0x35,
-	0x62, 0xf7, 0x6d, 0x4a, 0xba, 0xe5, 0x09, 0xee, 0x60, 0x5e, 0x97, 0x09, 0x4e, 0xe4, 0xc2, 0x29,
-	0xd2, 0xa8, 0x0b, 0x15, 0x37, 0x38, 0xb1, 0x93, 0xeb, 0x45, 0xc7, 0x47, 0x76, 0x4b, 0xd1, 0x9c,
-	0x2f, 0x0e, 0x93, 0x5c, 0xc1, 0xeb, 0xc7, 0x47, 0x95, 0xca, 0xf2, 0xc9, 0xac, 0x78, 0x10, 0x16,
-	0xfa, 0x00, 0xca, 0x4a, 0x9a, 0x9e, 0x29, 0xae, 0xe7, 0x35, 0x16, 0xf1, 0x52, 0x15, 0xa4, 0x4a,
-	0x23, 0x0a, 0x53, 0x4a, 0xb8, 0xd5, 0xd5, 0x2e, 0x4f, 0x67, 0x7a, 0xf2, 0x8c, 0x74, 0xc8, 0xfa,
-	0xcf, 0x1e, 0x11, 0x82, 0x8d, 0x63, 0x1a, 0xd0, 0xef, 0x00, 0x52, 0xa2, 0xdd, 0xb9, 0x76, 0x19,
-	0x65, 0x4a, 0x74, 0xb1, 0xb6, 0x5e, 0xdf, 0xed, 0x62, 0x24, 0x1b, 0x27, 0xe8, 0x61, 0x05, 0xba,
-	0x12, 0xe9, 0x28, 0xb6, 0xcb, 0xf3, 0x5c, 0x79, 0x2d, 0x9b, 0x72, 0x4f, 0x2e, 0xf0, 0x61, 0x25,
-	0x8a, 0x88, 0xe3, 0x4a, 0xd0, 0x1a, 0xcc, 0x8a, 0xc1, 0x6d, 0xdd, 0x56, 0x76, 0x49, 0xb3, 0x6f,
-	0xb7, 0xa8, 0x66, 0x97, 0x67, 0x78, 0x7c, 0xe7, 0x1f, 0xf7, 0x16, 0x13, 0xe8, 0x38, 0x51, 0x0a,
-	0xbd, 0x0b, 0x53, 0xbb, 0x86, 0xb5, 0xa3, 0xb6, 0xdb, 0x44, 0x77, 0x91, 0x66, 0x39, 0xd2, 0x2c,
-	0xdb, 0x87, 0x95, 0x08, 0x0d, 0xc7, 0xb8, 0x91, 0x0d, 0x17, 0x05, 0x72, 0xc3, 0x32, 0x5a, 0xeb,
-	0x46, 0x4f, 0xa7, 0x4e, 0xd9, 0x77, 0xd1, 0x4b, 0xa3, 0x17, 0x17, 0x93, 0x18, 0x5e, 0x1c, 0x55,
-	0xae, 0x26, 0x57, 0xf9, 0x3e, 0x13, 0x4e, 0xc6, 0x46, 0x26, 0x8c, 0x89, 0x3e, 0xf1, 0x25, 0x4d,
-	0xb1, 0xed, 0x72, 0x99, 0x1f, 0xfd, 0x07, 0x83, 0x03, 0x9e, 0x27, 0x12, 0x3d, 0xff, 0x53, 0xc7,
-	0x47, 0x95, 0xb1, 0x20, 0x03, 0x0e, 0x69, 0xe0, 0x7d, 0x41, 0xe2, 0x6b, 0xd4, 0xf9, 0xf4, 0x56,
-	0x9f, 0xae, 0x2f, 0xc8, 0x37, 0xed, 0xa5, 0xf5, 0x05, 0x05, 0x20, 0x4f, 0x7e, 0x97, 0xfe, 0xcf,
-	0x1c, 0xcc, 0xf8, 0xcc, 0x99, 0xfb, 0x82, 0x12, 0x44, 0xfe, 0xbf, 0xbf, 0x3a, 0x5b, 0xaf, 0x8e,
-	0xbf, 0x74, 0xff, 0xf7, 0x7a, 0x75, 0x7c, 0xdb, 0x52, 0x6e, 0x0f, 0x7f, 0x9b, 0x0b, 0x4e, 0xe0,
-	0x94, 0x0d, 0x23, 0x2f, 0xa1, 0xc5, 0xf8, 0x27, 0xd7, 0x73, 0x22, 0x7f, 0x93, 0x87, 0xa9, 0xe8,
-	0x69, 0x0c, 0xf5, 0x15, 0x48, 0x03, 0xfb, 0x0a, 0x1a, 0x30, 0xbb, 0xdb, 0xd3, 0xb4, 0x3e, 0x9f,
-	0x43, 0xa0, 0xb9, 0xc0, 0xf9, 0x2e, 0xf8, 0x9a, 0x90, 0x9c, 0x5d, 0x49, 0xe0, 0xc1, 0x89, 0x92,
-	0xf1, 0x36, 0x83, 0xc2, 0x8f, 0x6d, 0x33, 0x18, 0x3a, 0x43, 0x9b, 0x41, 0x72, 0xa7, 0x46, 0xfe,
-	0x4c, 0x9d, 0x1a, 0x67, 0xe9, 0x31, 0x48, 0x08, 0x62, 0x03, 0xfb, 0x65, 0x5f, 0x83, 0x4b, 0x42,
-	0x8c, 0xf2, 0xde, 0x01, 0x9d, 0x5a, 0x86, 0xa6, 0x11, 0x6b, 0xb9, 0xd7, 0xed, 0xf6, 0xe5, 0x5f,
-	0xc2, 0x44, 0xb8, 0x2b, 0xc6, 0xd9, 0x69, 0xa7, 0x31, 0x47, 0x7c, 0x9d, 0x0d, 0xec, 0xb4, 0x33,
-	0x8e, 0x3d, 0x0e, 0xf9, 0xf7, 0x25, 0x98, 0x4b, 0xee, 0x7e, 0x45, 0x1a, 0x4c, 0x74, 0x95, 0xc3,
-	0x60, 0x47, 0xb2, 0x74, 0xc6, 0x77, 0x33, 0xde, 0x0e, 0xb1, 0x1e, 0xc2, 0xc2, 0x11, 0x6c, 0xf9,
-	0x07, 0x09, 0xe6, 0x53, 0x1a, 0x11, 0xce, 0xd7, 0x12, 0xf4, 0x21, 0x94, 0xba, 0xca, 0x61, 0xb3,
-	0x67, 0x75, 0xc8, 0x99, 0x5f, 0x0a, 0xf9, 0x71, 0x5f, 0x17, 0x28, 0xd8, 0xc3, 0x93, 0xff, 0x52,
-	0x82, 0x9f, 0xa5, 0x5e, 0xa4, 0xd0, 0xbd, 0x50, 0xcf, 0x84, 0x1c, 0xe9, 0x99, 0x40, 0x71, 0xc1,
-	0x57, 0xd4, 0x32, 0xf1, 0x85, 0x04, 0xe5, 0xb4, 0x9b, 0x25, 0xba, 0x1b, 0x32, 0xf2, 0xe7, 0x11,
-	0x23, 0xa7, 0x63, 0x72, 0xaf, 0xc8, 0xc6, 0x7f, 0x95, 0xe0, 0xf2, 0x09, 0x15, 0x9a, 0x77, 0x81,
-	0x21, 0xed, 0x20, 0x17, 0x7f, 0xd4, 0x16, 0x5f, 0xc4, 0xfc, 0x0b, 0x4c, 0x02, 0x0f, 0x4e, 0x95,
-	0x46, 0xdb, 0x30, 0x2f, 0x6e, 0x4f, 0x51, 0x9a, 0x28, 0x3e, 0x78, 0x6b, 0xd9, 0x72, 0x32, 0x0b,
-	0x4e, 0x93, 0x95, 0xff, 0x46, 0x82, 0xb9, 0xe4, 0x27, 0x03, 0xf4, 0x76, 0x68, 0xc9, 0x2b, 0x91,
-	0x25, 0x9f, 0x8c, 0x48, 0x89, 0x05, 0xff, 0x18, 0x26, 0xc4, 0xc3, 0x82, 0x80, 0x11, 0xce, 0x2c,
-	0x27, 0xe5, 0x17, 0x01, 0xe1, 0x96, 0xb7, 0xfc, 0x98, 0x84, 0xc7, 0x70, 0x04, 0x4d, 0xfe, 0x83,
-	0x1c, 0x0c, 0x35, 0x5b, 0x8a, 0x46, 0xce, 0xa1, 0xba, 0x7d, 0x2f, 0x54, 0xdd, 0x0e, 0xfa, 0xa7,
-	0x2d, 0x6e, 0x55, 0x6a, 0x61, 0x8b, 0x23, 0x85, 0xed, 0x9b, 0x99, 0xd0, 0x4e, 0xae, 0x69, 0x7f,
-	0x0d, 0x46, 0x3c, 0xa5, 0xa7, 0x4b, 0xb5, 0xf2, 0x5f, 0xe4, 0x60, 0x34, 0xa0, 0xe2, 0x94, 0x89,
-	0x7a, 0x37, 0x54, 0x9d, 0xe4, 0x33, 0x3c, 0xe3, 0x04, 0x74, 0x55, 0xdd, 0x7a, 0xc4, 0x69, 0x3a,
-	0xf6, 0xdb, 0x4c, 0xe3, 0x65, 0xca, 0x2f, 0x61, 0x82, 0x2a, 0x56, 0x87, 0x50, 0xef, 0xb3, 0x46,
-	0x9e, 0xfb, 0xa2, 0xd7, 0xfd, 0xbe, 0x15, 0xa2, 0xe2, 0x08, 0xf7, 0xa5, 0x87, 0x30, 0x1e, 0x52,
-	0x76, 0xaa, 0x9e, 0xe1, 0xbf, 0x97, 0xe0, 0xe7, 0x03, 0x9f, 0x82, 0x50, 0x3d, 0x74, 0x48, 0xaa,
-	0x91, 0x43, 0xb2, 0x90, 0x0e, 0xf0, 0xea, 0x7a, 0xcf, 0xea, 0x37, 0x9f, 0x7f, 0xbf, 0x70, 0xe1,
-	0xdb, 0xef, 0x17, 0x2e, 0x7c, 0xf7, 0xfd, 0xc2, 0x85, 0xdf, 0x3d, 0x5e, 0x90, 0x9e, 0x1f, 0x2f,
-	0x48, 0xdf, 0x1e, 0x2f, 0x48, 0xdf, 0x1d, 0x2f, 0x48, 0xff, 0x7e, 0xbc, 0x20, 0xfd, 0xc9, 0x0f,
-	0x0b, 0x17, 0x3e, 0x2c, 0x0a, 0xb8, 0xff, 0x0d, 0x00, 0x00, 0xff, 0xff, 0xa0, 0x62, 0xda, 0xf9,
-	0x07, 0x3e, 0x00, 0x00,
+	// 3743 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x5b, 0x4d, 0x6c, 0x1c, 0xc7,
+	0x72, 0xd6, 0xec, 0x2e, 0xb9, 0xcb, 0xe2, 0x7f, 0x93, 0x22, 0xf7, 0x49, 0x4f, 0x5c, 0xbd, 0x31,
+	0xa0, 0xc8, 0x8e, 0xb4, 0x6b, 0xc9, 0x92, 0x9e, 0x22, 0x21, 0xef, 0x99, 0x4b, 0x8a, 0x12, 0x5f,
+	0xf8, 0xb3, 0xee, 0x25, 0x65, 0xc3, 0x88, 0x1d, 0x0f, 0x77, 0x9b, 0xcb, 0x11, 0x67, 0x67, 0xc6,
+	0xd3, 0xb3, 0x34, 0x17, 0xc8, 0x21, 0x87, 0x20, 0x80, 0x81, 0x00, 0xc9, 0xc5, 0x49, 0x8e, 0x31,
+	0x02, 0xe4, 0x94, 0x20, 0xc7, 0xe4, 0x60, 0x18, 0x09, 0xe2, 0x00, 0x42, 0xe0, 0x04, 0xbe, 0xc5,
+	0x27, 0x22, 0xa6, 0x4f, 0x41, 0x4e, 0xb9, 0x05, 0x3a, 0x05, 0xdd, 0xd3, 0xf3, 0x3f, 0xc3, 0x1d,
+	0xd2, 0x12, 0x11, 0x03, 0xef, 0x24, 0x6e, 0x57, 0xd5, 0x57, 0xd5, 0xdd, 0xd5, 0x55, 0xd5, 0x3d,
+	0x25, 0x58, 0xd9, 0xbf, 0x4f, 0xab, 0xaa, 0x51, 0xdb, 0xef, 0xed, 0x10, 0x4b, 0x27, 0x36, 0xa1,
+	0xb5, 0x03, 0xa2, 0xb7, 0x0d, 0xab, 0x26, 0x08, 0x8a, 0xa9, 0xd6, 0xc8, 0xa1, 0x4d, 0x74, 0xaa,
+	0x1a, 0x3a, 0xad, 0x1d, 0xdc, 0xda, 0x21, 0xb6, 0x72, 0xab, 0xd6, 0x21, 0x3a, 0xb1, 0x14, 0x9b,
+	0xb4, 0xab, 0xa6, 0x65, 0xd8, 0x06, 0xba, 0xe2, 0xb0, 0x57, 0x15, 0x53, 0xad, 0xfa, 0xec, 0x55,
+	0xc1, 0x7e, 0xe9, 0x66, 0x47, 0xb5, 0xf7, 0x7a, 0x3b, 0xd5, 0x96, 0xd1, 0xad, 0x75, 0x8c, 0x8e,
+	0x51, 0xe3, 0x52, 0x3b, 0xbd, 0x5d, 0xfe, 0x8b, 0xff, 0xe0, 0x7f, 0x39, 0x68, 0x97, 0xe4, 0x80,
+	0xf2, 0x96, 0x61, 0x91, 0xda, 0x41, 0x4c, 0xe3, 0xa5, 0x3b, 0x3e, 0x4f, 0x57, 0x69, 0xed, 0xa9,
+	0x3a, 0xb1, 0xfa, 0x35, 0x73, 0xbf, 0xc3, 0x06, 0x68, 0xad, 0x4b, 0x6c, 0x25, 0x49, 0xaa, 0x96,
+	0x26, 0x65, 0xf5, 0x74, 0x5b, 0xed, 0x92, 0x98, 0xc0, 0xbd, 0x41, 0x02, 0xb4, 0xb5, 0x47, 0xba,
+	0x4a, 0x4c, 0xee, 0xad, 0x34, 0xb9, 0x9e, 0xad, 0x6a, 0x35, 0x55, 0xb7, 0xa9, 0x6d, 0x45, 0x85,
+	0xe4, 0x3b, 0x30, 0xb5, 0xa8, 0x69, 0xc6, 0x27, 0xa4, 0xbd, 0xd4, 0x5c, 0x5d, 0xb6, 0xd4, 0x03,
+	0x62, 0xa1, 0xab, 0x50, 0xd0, 0x95, 0x2e, 0x29, 0x4b, 0x57, 0xa5, 0xeb, 0x23, 0xf5, 0xb1, 0xe7,
+	0x47, 0x95, 0x0b, 0xc7, 0x47, 0x95, 0xc2, 0x86, 0xd2, 0x25, 0x98, 0x53, 0xe4, 0x87, 0x30, 0x2d,
+	0xa4, 0x56, 0x34, 0x72, 0xf8, 0xd4, 0xd0, 0x7a, 0x5d, 0x82, 0xae, 0xc1, 0x70, 0x9b, 0x03, 0x08,
+	0xc1, 0x09, 0x21, 0x38, 0xec, 0xc0, 0x62, 0x41, 0x95, 0x29, 0x4c, 0x0a, 0xe1, 0x27, 0x06, 0xb5,
+	0x1b, 0x8a, 0xbd, 0x87, 0x6e, 0x03, 0x98, 0x8a, 0xbd, 0xd7, 0xb0, 0xc8, 0xae, 0x7a, 0x28, 0xc4,
+	0x91, 0x10, 0x87, 0x86, 0x47, 0xc1, 0x01, 0x2e, 0x74, 0x03, 0x4a, 0x16, 0x51, 0xda, 0x9b, 0xba,
+	0xd6, 0x2f, 0xe7, 0xae, 0x4a, 0xd7, 0x4b, 0xf5, 0x29, 0x21, 0x51, 0xc2, 0x62, 0x1c, 0x7b, 0x1c,
+	0xf2, 0x67, 0x39, 0x18, 0x59, 0x56, 0x48, 0xd7, 0xd0, 0x9b, 0xc4, 0x46, 0x1f, 0x41, 0x89, 0x6d,
+	0x57, 0x5b, 0xb1, 0x15, 0xae, 0x6d, 0xf4, 0xf6, 0x9b, 0x55, 0xdf, 0x9d, 0xbc, 0xd5, 0xab, 0x9a,
+	0xfb, 0x1d, 0x36, 0x40, 0xab, 0x8c, 0xbb, 0x7a, 0x70, 0xab, 0xba, 0xb9, 0xf3, 0x8c, 0xb4, 0xec,
+	0x75, 0x62, 0x2b, 0xbe, 0x7d, 0xfe, 0x18, 0xf6, 0x50, 0xd1, 0x06, 0x14, 0xa8, 0x49, 0x5a, 0xdc,
+	0xb2, 0xd1, 0xdb, 0x37, 0xaa, 0x27, 0x3a, 0x6b, 0xd5, 0xb3, 0xac, 0x69, 0x92, 0x96, 0xbf, 0xe2,
+	0xec, 0x17, 0xe6, 0x38, 0xe8, 0x29, 0x0c, 0x53, 0x5b, 0xb1, 0x7b, 0xb4, 0x9c, 0xe7, 0x88, 0xd5,
+	0xcc, 0x88, 0x5c, 0xca, 0xdf, 0x0c, 0xe7, 0x37, 0x16, 0x68, 0xf2, 0x7f, 0xe5, 0x00, 0x79, 0xbc,
+	0x4b, 0x86, 0xde, 0x56, 0x6d, 0xd5, 0xd0, 0xd1, 0x03, 0x28, 0xd8, 0x7d, 0xd3, 0x75, 0x81, 0x6b,
+	0xae, 0x41, 0x5b, 0x7d, 0x93, 0xbc, 0x38, 0xaa, 0xcc, 0xc5, 0x25, 0x18, 0x05, 0x73, 0x19, 0xb4,
+	0xe6, 0x99, 0x9a, 0xe3, 0xd2, 0x77, 0xc2, 0xaa, 0x5f, 0x1c, 0x55, 0x12, 0x0e, 0x5b, 0xd5, 0x43,
+	0x0a, 0x1b, 0x88, 0x0e, 0x00, 0x69, 0x0a, 0xb5, 0xb7, 0x2c, 0x45, 0xa7, 0x8e, 0x26, 0xb5, 0x4b,
+	0xc4, 0x22, 0xbc, 0x91, 0x6d, 0xd3, 0x98, 0x44, 0xfd, 0x92, 0xb0, 0x02, 0xad, 0xc5, 0xd0, 0x70,
+	0x82, 0x06, 0xe6, 0xcd, 0x16, 0x51, 0xa8, 0xa1, 0x97, 0x0b, 0x61, 0x6f, 0xc6, 0x7c, 0x14, 0x0b,
+	0x2a, 0x7a, 0x1d, 0x8a, 0x5d, 0x42, 0xa9, 0xd2, 0x21, 0xe5, 0x21, 0xce, 0x38, 0x29, 0x18, 0x8b,
+	0xeb, 0xce, 0x30, 0x76, 0xe9, 0xf2, 0x17, 0x12, 0x8c, 0x7b, 0x2b, 0xb7, 0xa6, 0x52, 0x1b, 0xfd,
+	0x6e, 0xcc, 0x0f, 0xab, 0xd9, 0xa6, 0xc4, 0xa4, 0xb9, 0x17, 0x7a, 0x3e, 0xef, 0x8e, 0x04, 0x7c,
+	0x70, 0x1d, 0x86, 0x54, 0x9b, 0x74, 0xd9, 0x3e, 0xe4, 0xaf, 0x8f, 0xde, 0xbe, 0x9e, 0xd5, 0x65,
+	0xea, 0xe3, 0x02, 0x74, 0x68, 0x95, 0x89, 0x63, 0x07, 0x45, 0xfe, 0xb3, 0x42, 0xc0, 0x7c, 0xe6,
+	0x9a, 0xe8, 0x03, 0x28, 0x51, 0xa2, 0x91, 0x96, 0x6d, 0x58, 0xc2, 0xfc, 0xb7, 0x32, 0x9a, 0xaf,
+	0xec, 0x10, 0xad, 0x29, 0x44, 0xeb, 0x63, 0xcc, 0x7e, 0xf7, 0x17, 0xf6, 0x20, 0xd1, 0x3b, 0x50,
+	0xb2, 0x49, 0xd7, 0xd4, 0x14, 0x9b, 0x88, 0x73, 0xf4, 0x5a, 0x70, 0x0a, 0xcc, 0x73, 0x18, 0x58,
+	0xc3, 0x68, 0x6f, 0x09, 0x36, 0x7e, 0x7c, 0xbc, 0x25, 0x71, 0x47, 0xb1, 0x07, 0x83, 0x0e, 0x60,
+	0xa2, 0x67, 0xb6, 0x19, 0xa7, 0xcd, 0xa2, 0x60, 0xa7, 0x2f, 0x3c, 0xe9, 0x5e, 0xd6, 0xb5, 0xd9,
+	0x0e, 0x49, 0xd7, 0xe7, 0x84, 0xae, 0x89, 0xf0, 0x38, 0x8e, 0x68, 0x41, 0x8b, 0x30, 0xd9, 0x55,
+	0x75, 0x16, 0x97, 0xfa, 0x4d, 0xd2, 0x32, 0xf4, 0x36, 0xe5, 0x6e, 0x35, 0x54, 0x9f, 0x17, 0x00,
+	0x93, 0xeb, 0x61, 0x32, 0x8e, 0xf2, 0xa3, 0x5f, 0x01, 0x72, 0xa7, 0xf1, 0xd8, 0x09, 0xe2, 0xaa,
+	0xa1, 0x73, 0x9f, 0xcb, 0xfb, 0xce, 0xbd, 0x15, 0xe3, 0xc0, 0x09, 0x52, 0x68, 0x0d, 0x66, 0x2d,
+	0x72, 0xa0, 0xb2, 0x39, 0x3e, 0x51, 0xa9, 0x6d, 0x58, 0xfd, 0x35, 0xb5, 0xab, 0xda, 0xe5, 0x61,
+	0x6e, 0x53, 0xf9, 0xf8, 0xa8, 0x32, 0x8b, 0x13, 0xe8, 0x38, 0x51, 0x4a, 0xfe, 0xf3, 0x61, 0x98,
+	0x8c, 0xc4, 0x1b, 0xf4, 0x14, 0xe6, 0x5a, 0x3d, 0xcb, 0x22, 0xba, 0xbd, 0xd1, 0xeb, 0xee, 0x10,
+	0xab, 0xd9, 0xda, 0x23, 0xed, 0x9e, 0x46, 0xda, 0xdc, 0x51, 0x86, 0xea, 0x0b, 0xc2, 0xe2, 0xb9,
+	0xa5, 0x44, 0x2e, 0x9c, 0x22, 0xcd, 0x56, 0x41, 0xe7, 0x43, 0xeb, 0x2a, 0xa5, 0x1e, 0x66, 0x8e,
+	0x63, 0x7a, 0xab, 0xb0, 0x11, 0xe3, 0xc0, 0x09, 0x52, 0xcc, 0xc6, 0x36, 0xa1, 0xaa, 0x45, 0xda,
+	0x51, 0x1b, 0xf3, 0x61, 0x1b, 0x97, 0x13, 0xb9, 0x70, 0x8a, 0x34, 0xba, 0x0b, 0xa3, 0x8e, 0x36,
+	0xbe, 0x7f, 0x62, 0xa3, 0x67, 0x04, 0xd8, 0xe8, 0x86, 0x4f, 0xc2, 0x41, 0x3e, 0x36, 0x35, 0x63,
+	0x87, 0x12, 0xeb, 0x80, 0xb4, 0xd3, 0x37, 0x78, 0x33, 0xc6, 0x81, 0x13, 0xa4, 0xd8, 0xd4, 0x1c,
+	0x0f, 0x8c, 0x4d, 0x6d, 0x38, 0x3c, 0xb5, 0xed, 0x44, 0x2e, 0x9c, 0x22, 0xcd, 0xfc, 0xd8, 0x31,
+	0x79, 0xf1, 0x40, 0x51, 0x35, 0x65, 0x47, 0x23, 0xe5, 0x62, 0xd8, 0x8f, 0x37, 0xc2, 0x64, 0x1c,
+	0xe5, 0x47, 0x8f, 0x61, 0xda, 0x19, 0xda, 0xd6, 0x15, 0x0f, 0xa4, 0xc4, 0x41, 0x7e, 0x22, 0x40,
+	0xa6, 0x37, 0xa2, 0x0c, 0x38, 0x2e, 0x83, 0x1e, 0xc0, 0x44, 0xcb, 0xd0, 0x34, 0xee, 0x8f, 0x4b,
+	0x46, 0x4f, 0xb7, 0xcb, 0x23, 0x1c, 0x05, 0xb1, 0xf3, 0xb8, 0x14, 0xa2, 0xe0, 0x08, 0x27, 0x22,
+	0x00, 0x2d, 0x37, 0xe1, 0xd0, 0x32, 0xf0, 0xf8, 0x78, 0x2b, 0x6b, 0x0c, 0xf0, 0x52, 0x95, 0x5f,
+	0x03, 0x78, 0x43, 0x14, 0x07, 0x80, 0xe5, 0x7f, 0x95, 0x60, 0x3e, 0x25, 0x74, 0xa0, 0x5f, 0x86,
+	0x52, 0xec, 0x6f, 0x46, 0x52, 0xec, 0xe5, 0x14, 0xb1, 0x40, 0x9e, 0xd5, 0x61, 0xdc, 0x62, 0xb3,
+	0xd2, 0x3b, 0x0e, 0x8b, 0x88, 0x91, 0x77, 0x07, 0x4c, 0x03, 0x07, 0x65, 0xfc, 0x98, 0x3f, 0x7d,
+	0x7c, 0x54, 0x19, 0x0f, 0xd1, 0x70, 0x18, 0x5e, 0xfe, 0x8b, 0x1c, 0xc0, 0x32, 0x31, 0x35, 0xa3,
+	0xdf, 0x25, 0xfa, 0x79, 0xd4, 0x50, 0x9b, 0xa1, 0x1a, 0xea, 0xe6, 0xa0, 0xed, 0xf1, 0x4c, 0x4b,
+	0x2d, 0xa2, 0xde, 0x8d, 0x14, 0x51, 0xb5, 0xec, 0x90, 0x27, 0x57, 0x51, 0xff, 0x91, 0x87, 0x19,
+	0x9f, 0xd9, 0x2f, 0xa3, 0x1e, 0x86, 0xf6, 0xf8, 0x37, 0x22, 0x7b, 0x3c, 0x9f, 0x20, 0xf2, 0xca,
+	0xea, 0xa8, 0x67, 0x30, 0xc1, 0xaa, 0x1c, 0x67, 0x2f, 0x79, 0x0d, 0x35, 0x7c, 0xea, 0x1a, 0xca,
+	0xcb, 0x76, 0x6b, 0x21, 0x24, 0x1c, 0x41, 0x4e, 0xa9, 0xd9, 0x8a, 0x3f, 0xc6, 0x9a, 0xed, 0x4b,
+	0x09, 0x26, 0xfc, 0x6d, 0x3a, 0x87, 0xa2, 0x6d, 0x23, 0x5c, 0xb4, 0xbd, 0x9e, 0xd9, 0x45, 0x53,
+	0xaa, 0xb6, 0xff, 0x65, 0x05, 0xbe, 0xc7, 0xc4, 0x0e, 0xf8, 0x8e, 0xd2, 0xda, 0x1f, 0x7c, 0xc7,
+	0x43, 0x9f, 0x49, 0x80, 0x44, 0x16, 0x58, 0xd4, 0x75, 0xc3, 0x56, 0x9c, 0x58, 0xe9, 0x98, 0xb5,
+	0x9a, 0xd9, 0x2c, 0x57, 0x63, 0x75, 0x3b, 0x86, 0xf5, 0x48, 0xb7, 0xad, 0xbe, 0xbf, 0xc9, 0x71,
+	0x06, 0x9c, 0x60, 0x00, 0x52, 0x00, 0x2c, 0x81, 0xb9, 0x65, 0x88, 0x83, 0x7c, 0x33, 0x43, 0xcc,
+	0x63, 0x02, 0x4b, 0x86, 0xbe, 0xab, 0x76, 0xfc, 0xb0, 0x83, 0x3d, 0x20, 0x1c, 0x00, 0xbd, 0xf4,
+	0x08, 0xe6, 0x53, 0xac, 0x45, 0x53, 0x90, 0xdf, 0x27, 0x7d, 0x67, 0xd9, 0x30, 0xfb, 0x13, 0xcd,
+	0xc2, 0xd0, 0x81, 0xa2, 0xf5, 0x9c, 0xf0, 0x3b, 0x82, 0x9d, 0x1f, 0x0f, 0x72, 0xf7, 0x25, 0xf9,
+	0x8b, 0xa1, 0xa0, 0xef, 0xf0, 0x8a, 0xf9, 0x3a, 0xbb, 0xb4, 0x9a, 0x9a, 0xda, 0x52, 0xa8, 0x28,
+	0x84, 0xc6, 0x9c, 0x0b, 0xab, 0x33, 0x86, 0x3d, 0x6a, 0xa8, 0xb6, 0xce, 0xbd, 0xda, 0xda, 0x3a,
+	0xff, 0x72, 0x6a, 0xeb, 0xdf, 0x83, 0x12, 0x75, 0xab, 0xea, 0x02, 0x87, 0xbc, 0x75, 0x8a, 0xf8,
+	0x2a, 0x0a, 0x6a, 0x4f, 0x81, 0x57, 0x4a, 0x7b, 0xa0, 0x49, 0x45, 0xf4, 0xd0, 0x29, 0x8b, 0xe8,
+	0x97, 0x5a, 0xf8, 0xb2, 0x78, 0x63, 0x2a, 0x3d, 0x4a, 0xda, 0x3c, 0xb6, 0x95, 0xfc, 0x78, 0xd3,
+	0xe0, 0xa3, 0x58, 0x50, 0xd1, 0x07, 0x21, 0x97, 0x2d, 0x9d, 0xc5, 0x65, 0x27, 0xd2, 0xdd, 0x15,
+	0x6d, 0xc3, 0xbc, 0x69, 0x19, 0x1d, 0x8b, 0x50, 0xba, 0x4c, 0x94, 0xb6, 0xa6, 0xea, 0xc4, 0x5d,
+	0x1f, 0xa7, 0x22, 0xba, 0x7c, 0x7c, 0x54, 0x99, 0x6f, 0x24, 0xb3, 0xe0, 0x34, 0x59, 0xf9, 0x79,
+	0x01, 0xa6, 0xa2, 0x19, 0x30, 0xa5, 0x48, 0x95, 0xce, 0x54, 0xa4, 0xde, 0x08, 0x1c, 0x06, 0xa7,
+	0x82, 0x0f, 0xbc, 0xe0, 0xc4, 0x0e, 0xc4, 0x22, 0x4c, 0x8a, 0x68, 0xe0, 0x12, 0x45, 0x99, 0xee,
+	0xed, 0xfe, 0x76, 0x98, 0x8c, 0xa3, 0xfc, 0xe8, 0x21, 0x8c, 0x5b, 0xbc, 0xee, 0x76, 0x01, 0x9c,
+	0xda, 0xf5, 0xa2, 0x00, 0x18, 0xc7, 0x41, 0x22, 0x0e, 0xf3, 0xb2, 0xba, 0xd5, 0x2f, 0x47, 0x5d,
+	0x80, 0x42, 0xb8, 0x6e, 0x5d, 0x8c, 0x32, 0xe0, 0xb8, 0x0c, 0x5a, 0x87, 0x99, 0x9e, 0x1e, 0x87,
+	0x72, 0x5c, 0xf9, 0xb2, 0x80, 0x9a, 0xd9, 0x8e, 0xb3, 0xe0, 0x24, 0x39, 0xb4, 0x1b, 0x2a, 0x65,
+	0x87, 0x79, 0x78, 0xbe, 0x9d, 0xf9, 0xe0, 0x65, 0xae, 0x65, 0x13, 0xca, 0xed, 0x52, 0xd6, 0x72,
+	0x5b, 0xfe, 0x27, 0x29, 0x98, 0x84, 0xbc, 0x12, 0x78, 0xd0, 0x2b, 0x53, 0x4c, 0x22, 0x50, 0x1d,
+	0x19, 0xc9, 0xd5, 0xef, 0xbd, 0x53, 0x55, 0xbf, 0x7e, 0xf2, 0x1c, 0x5c, 0xfe, 0x7e, 0x2e, 0xc1,
+	0xdc, 0x4a, 0xf3, 0xb1, 0x65, 0xf4, 0x4c, 0xd7, 0x9c, 0x4d, 0xd3, 0x59, 0x9a, 0x9f, 0x43, 0xc1,
+	0xea, 0x69, 0xee, 0x3c, 0x5e, 0x73, 0xe7, 0x81, 0x7b, 0x1a, 0x9b, 0xc7, 0x4c, 0x44, 0xca, 0x99,
+	0x04, 0x13, 0x40, 0x1b, 0x30, 0x6c, 0x29, 0x7a, 0x87, 0xb8, 0x69, 0xf5, 0xda, 0x00, 0xeb, 0x57,
+	0x97, 0x31, 0x63, 0x0f, 0x14, 0x36, 0x5c, 0x1a, 0x0b, 0x14, 0xf9, 0x9f, 0x25, 0x98, 0x7c, 0xb2,
+	0xb5, 0xd5, 0x58, 0xd5, 0xf9, 0x89, 0xe6, 0x6f, 0xab, 0x57, 0xa1, 0x60, 0x2a, 0xf6, 0x5e, 0x34,
+	0xd3, 0x33, 0x1a, 0xe6, 0x14, 0x74, 0x07, 0x4a, 0xec, 0x5f, 0x66, 0x17, 0x3f, 0x52, 0x23, 0x3c,
+	0x10, 0x96, 0x1a, 0x62, 0xec, 0x45, 0xe0, 0x6f, 0xec, 0x71, 0xa2, 0xf7, 0xa0, 0xc8, 0xe2, 0x0f,
+	0xd1, 0xdb, 0x19, 0x0b, 0x74, 0x61, 0x54, 0xdd, 0x11, 0xf2, 0x6b, 0x2e, 0x31, 0x80, 0x5d, 0x38,
+	0x79, 0x1f, 0x66, 0x03, 0x93, 0x60, 0xab, 0xf8, 0x94, 0xe5, 0x54, 0xd4, 0x84, 0x21, 0xa6, 0x9d,
+	0x65, 0xce, 0x7c, 0x86, 0x27, 0xd0, 0xc8, 0x42, 0xf8, 0xf5, 0x11, 0xfb, 0x45, 0xb1, 0x83, 0x25,
+	0xaf, 0xc3, 0x38, 0x7f, 0x86, 0x36, 0x2c, 0x9b, 0x2f, 0x26, 0xba, 0x02, 0xf9, 0xae, 0xaa, 0x8b,
+	0xec, 0x3c, 0x2a, 0x64, 0xf2, 0x2c, 0xb3, 0xb0, 0x71, 0x4e, 0x56, 0x0e, 0x45, 0xbc, 0xf2, 0xc9,
+	0xca, 0x21, 0x66, 0xe3, 0xf2, 0x63, 0x28, 0x8a, 0x4d, 0x0a, 0x02, 0xe5, 0x4f, 0x06, 0xca, 0x27,
+	0x00, 0x6d, 0x42, 0x71, 0xb5, 0x51, 0xd7, 0x0c, 0xa7, 0x56, 0x6b, 0xa9, 0x6d, 0x2b, 0xba, 0x83,
+	0x4b, 0xab, 0xcb, 0x18, 0x73, 0x0a, 0x92, 0x61, 0x98, 0x1c, 0xb6, 0x88, 0x69, 0x73, 0x3f, 0x1a,
+	0xa9, 0x03, 0xf3, 0x8d, 0x47, 0x7c, 0x04, 0x0b, 0x8a, 0xfc, 0x27, 0x39, 0x28, 0x8a, 0xe5, 0x38,
+	0x87, 0xbb, 0xdb, 0x5a, 0xe8, 0xee, 0xf6, 0x46, 0x36, 0xd7, 0x48, 0xbd, 0xb8, 0x6d, 0x45, 0x2e,
+	0x6e, 0x37, 0x32, 0xe2, 0x9d, 0x7c, 0x6b, 0xfb, 0x34, 0x07, 0x13, 0x61, 0xa7, 0x44, 0x77, 0x61,
+	0x94, 0xa5, 0x29, 0xb5, 0x45, 0x36, 0xfc, 0xea, 0xd8, 0x7b, 0xba, 0x69, 0xfa, 0x24, 0x1c, 0xe4,
+	0x43, 0x1d, 0x4f, 0x8c, 0xf9, 0x91, 0x98, 0x74, 0xfa, 0x92, 0xf6, 0x6c, 0x55, 0xab, 0x3a, 0x1f,
+	0x64, 0xaa, 0xab, 0xba, 0xbd, 0x69, 0x35, 0x6d, 0x4b, 0xd5, 0x3b, 0x31, 0x45, 0xdc, 0x29, 0x83,
+	0xc8, 0xe8, 0x5d, 0x96, 0x32, 0xa9, 0xd1, 0xb3, 0x5a, 0x24, 0xa9, 0xf4, 0x75, 0xcb, 0x36, 0x76,
+	0x40, 0xdb, 0x6b, 0x46, 0x4b, 0xd1, 0x9c, 0xcd, 0xc1, 0x64, 0x97, 0x58, 0x44, 0x6f, 0x11, 0xb7,
+	0xdc, 0x74, 0x20, 0xb0, 0x07, 0x26, 0xff, 0x83, 0x04, 0xa3, 0x62, 0x2d, 0xce, 0xe1, 0x92, 0xf3,
+	0x3b, 0xe1, 0x4b, 0xce, 0xb5, 0x8c, 0x91, 0x23, 0xf9, 0x86, 0xf3, 0xd7, 0xbe, 0xe9, 0x2c, 0x56,
+	0xb0, 0xe3, 0xb2, 0x67, 0x50, 0x3b, 0x7a, 0x5c, 0xd8, 0x29, 0xc7, 0x9c, 0x82, 0x7a, 0x30, 0xa5,
+	0x46, 0x82, 0x8b, 0xd8, 0xb3, 0x5a, 0x36, 0x4b, 0x3c, 0xb1, 0x7a, 0x59, 0xc0, 0x4f, 0x45, 0x29,
+	0x38, 0xa6, 0x42, 0x26, 0x10, 0xe3, 0x42, 0xef, 0x40, 0x61, 0xcf, 0xb6, 0xcd, 0x84, 0xe7, 0xf3,
+	0x01, 0x21, 0xcd, 0x37, 0xa1, 0xc4, 0x67, 0xb7, 0xb5, 0xd5, 0xc0, 0x1c, 0x4a, 0xfe, 0xc7, 0x9c,
+	0xb7, 0x1e, 0xfc, 0xce, 0xf1, 0xb6, 0x37, 0xdb, 0x25, 0x4d, 0xa1, 0x94, 0x3b, 0xb6, 0x73, 0x3f,
+	0x9e, 0x0d, 0x18, 0xee, 0xd1, 0x70, 0x8c, 0x1b, 0x6d, 0xf9, 0xa1, 0x5e, 0x3a, 0x4b, 0xa8, 0x1f,
+	0x4d, 0x0a, 0xf3, 0xe8, 0x09, 0xe4, 0x6d, 0x2d, 0xeb, 0x3d, 0x57, 0x20, 0x6e, 0xad, 0x35, 0xfd,
+	0x58, 0xb9, 0xb5, 0xd6, 0xc4, 0x0c, 0x02, 0x6d, 0xc2, 0x10, 0x4b, 0xa7, 0x2c, 0x3a, 0xe4, 0xb3,
+	0x47, 0x1b, 0xb6, 0x82, 0xbe, 0x4b, 0xb1, 0x5f, 0x14, 0x3b, 0x38, 0xf2, 0xc7, 0x30, 0x1e, 0x0a,
+	0x21, 0xe8, 0x23, 0x18, 0xd3, 0x0c, 0xa5, 0x5d, 0x57, 0x34, 0x45, 0x6f, 0x11, 0xf7, 0x6b, 0xc7,
+	0xb5, 0xa4, 0xb3, 0xb7, 0x16, 0xe0, 0x13, 0x01, 0x68, 0x56, 0x28, 0x19, 0x0b, 0xd2, 0x70, 0x08,
+	0x51, 0x56, 0x00, 0xfc, 0x39, 0xa2, 0x0a, 0x0c, 0x31, 0x4f, 0x75, 0x52, 0xdd, 0x48, 0x7d, 0x84,
+	0x59, 0xc8, 0x1c, 0x98, 0x62, 0x67, 0x1c, 0xdd, 0x06, 0xa0, 0xa4, 0x65, 0x11, 0x9b, 0x6f, 0x67,
+	0x2e, 0xfc, 0xc5, 0xb4, 0xe9, 0x51, 0x70, 0x80, 0x4b, 0xfe, 0x17, 0x09, 0xc6, 0x37, 0x88, 0xfd,
+	0x89, 0x61, 0xed, 0x37, 0x0c, 0x4d, 0x6d, 0xf5, 0xcf, 0x21, 0x0f, 0xe0, 0x50, 0x1e, 0x78, 0x73,
+	0xc0, 0xce, 0x84, 0xac, 0x4b, 0xcb, 0x06, 0xf2, 0x97, 0x12, 0xcc, 0x87, 0x38, 0x1f, 0xf9, 0x87,
+	0x7f, 0x1b, 0x86, 0x4c, 0xc3, 0xb2, 0xdd, 0x1a, 0xe1, 0x54, 0x0a, 0x59, 0x84, 0x0d, 0x54, 0x09,
+	0x0c, 0x06, 0x3b, 0x68, 0x68, 0x0d, 0x72, 0xb6, 0x21, 0x5c, 0xf5, 0x74, 0x98, 0x84, 0x58, 0x75,
+	0x10, 0x98, 0xb9, 0x2d, 0x03, 0xe7, 0x6c, 0x83, 0x6d, 0x44, 0x39, 0xc4, 0x15, 0x0c, 0x5f, 0xaf,
+	0x68, 0x06, 0x18, 0x0a, 0xbb, 0x96, 0xd1, 0x3d, 0xf3, 0x1c, 0xbc, 0x8d, 0x58, 0xb1, 0x8c, 0x2e,
+	0xe6, 0x58, 0xf2, 0x57, 0x12, 0x4c, 0x87, 0x38, 0xcf, 0x21, 0x75, 0xbc, 0x13, 0x4e, 0x1d, 0x37,
+	0x4e, 0x33, 0x91, 0x94, 0x04, 0xf2, 0x55, 0x2e, 0x32, 0x0d, 0x36, 0x61, 0xb4, 0x0b, 0xa3, 0xa6,
+	0xd1, 0x6e, 0xbe, 0x84, 0xef, 0x9b, 0x93, 0x2c, 0xa5, 0x37, 0x7c, 0x2c, 0x1c, 0x04, 0x46, 0x87,
+	0x30, 0xad, 0x2b, 0x5d, 0x42, 0x4d, 0xa5, 0x45, 0x9a, 0x2f, 0xe1, 0xc5, 0xe7, 0x22, 0xff, 0x80,
+	0x12, 0x45, 0xc4, 0x71, 0x25, 0x68, 0x1d, 0x8a, 0xaa, 0xc9, 0x4b, 0x4c, 0x51, 0x4b, 0x0c, 0xcc,
+	0xc3, 0x4e, 0x41, 0xea, 0xc4, 0x73, 0xf1, 0x03, 0xbb, 0x18, 0xf2, 0xdf, 0x44, 0xbd, 0x81, 0x57,
+	0x2c, 0x8f, 0xa1, 0xc4, 0x3b, 0x4d, 0x5a, 0x86, 0xe6, 0x7e, 0xea, 0xe0, 0x97, 0x0b, 0x31, 0xf6,
+	0xe2, 0xa8, 0x72, 0x39, 0xe1, 0x15, 0xdb, 0x25, 0x63, 0x4f, 0x18, 0x6d, 0x40, 0xc1, 0xfc, 0x21,
+	0xc5, 0x15, 0x4f, 0x93, 0xbc, 0xa2, 0xe2, 0x38, 0xf2, 0x1f, 0xe6, 0x23, 0xe6, 0xf2, 0x64, 0xf9,
+	0xec, 0xa5, 0xed, 0xba, 0x57, 0xcc, 0xa5, 0xee, 0xfc, 0x0e, 0x14, 0x45, 0xaa, 0x15, 0xce, 0xfc,
+	0xf3, 0xd3, 0x38, 0x73, 0x30, 0x8b, 0x79, 0x77, 0x29, 0x77, 0xd0, 0x05, 0x46, 0x1f, 0xc2, 0x30,
+	0x71, 0x54, 0x38, 0xb9, 0xf1, 0xde, 0x69, 0x54, 0xf8, 0x71, 0xd5, 0xaf, 0xa1, 0xc5, 0x98, 0x40,
+	0x45, 0xbf, 0x64, 0xeb, 0xc5, 0x78, 0x59, 0xc9, 0x49, 0xcb, 0x05, 0x9e, 0xae, 0xae, 0x38, 0xd3,
+	0xf6, 0x86, 0x5f, 0x1c, 0x55, 0xc0, 0xff, 0x89, 0x83, 0x12, 0xf2, 0xbf, 0x49, 0x30, 0xcd, 0x57,
+	0xa8, 0xd5, 0xb3, 0x54, 0xbb, 0x7f, 0x6e, 0x89, 0xe9, 0x69, 0x28, 0x31, 0xdd, 0x19, 0xb0, 0x2c,
+	0x31, 0x0b, 0x53, 0x93, 0xd3, 0xd7, 0x12, 0x5c, 0x8c, 0x71, 0x9f, 0x43, 0x5c, 0xdc, 0x0e, 0xc7,
+	0xc5, 0x37, 0x4f, 0x3b, 0xa1, 0x94, 0xd8, 0xf8, 0x3f, 0xd3, 0x09, 0xd3, 0xe1, 0x27, 0xe5, 0x36,
+	0x80, 0x69, 0xa9, 0x07, 0xaa, 0x46, 0x3a, 0xe2, 0xab, 0x7e, 0x29, 0xd0, 0xb3, 0xe5, 0x51, 0x70,
+	0x80, 0x0b, 0x51, 0x98, 0x6b, 0x93, 0x5d, 0xa5, 0xa7, 0xd9, 0x8b, 0xed, 0xf6, 0x92, 0x62, 0x2a,
+	0x3b, 0xaa, 0xa6, 0xda, 0xaa, 0x78, 0xff, 0x18, 0xa9, 0x3f, 0x74, 0xbe, 0xb6, 0x27, 0x71, 0xbc,
+	0x38, 0xaa, 0x5c, 0x49, 0xfa, 0xdc, 0xe5, 0xb2, 0xf4, 0x71, 0x0a, 0x34, 0xea, 0x43, 0xd9, 0x22,
+	0x1f, 0xf7, 0x54, 0x8b, 0xb4, 0x97, 0x2d, 0xc3, 0x0c, 0xa9, 0xcd, 0x73, 0xb5, 0xbf, 0x7d, 0x7c,
+	0x54, 0x29, 0xe3, 0x14, 0x9e, 0xc1, 0x8a, 0x53, 0xe1, 0xd1, 0x33, 0x98, 0x51, 0x44, 0x77, 0x5d,
+	0x50, 0xab, 0x73, 0x4a, 0xee, 0x1f, 0x1f, 0x55, 0x66, 0x16, 0xe3, 0xe4, 0xc1, 0x0a, 0x93, 0x40,
+	0x51, 0x0d, 0x8a, 0x07, 0xbc, 0x11, 0x8f, 0x96, 0x87, 0x38, 0x3e, 0x4b, 0x04, 0x45, 0xa7, 0x37,
+	0x8f, 0x61, 0x0e, 0xaf, 0x34, 0xf9, 0xe9, 0x73, 0xb9, 0xd8, 0x5d, 0x97, 0xd5, 0x92, 0xe2, 0xc4,
+	0xf3, 0x27, 0xf0, 0x92, 0x1f, 0xb5, 0x9e, 0xf8, 0x24, 0x1c, 0xe4, 0x43, 0x1f, 0xc0, 0xc8, 0x9e,
+	0x78, 0x30, 0xa1, 0xe5, 0x62, 0xa6, 0x24, 0x1c, 0x7a, 0x60, 0xa9, 0x4f, 0x0b, 0x15, 0x23, 0xee,
+	0x30, 0xc5, 0x3e, 0x22, 0x7a, 0x1d, 0x8a, 0xfc, 0xc7, 0xea, 0x32, 0x7f, 0x5f, 0x2c, 0xf9, 0xb1,
+	0xed, 0x89, 0x33, 0x8c, 0x5d, 0xba, 0xcb, 0xba, 0xda, 0x58, 0xe2, 0xef, 0xdc, 0x11, 0xd6, 0xd5,
+	0xc6, 0x12, 0x76, 0xe9, 0xe8, 0x23, 0x28, 0x52, 0xb2, 0xa6, 0xea, 0xbd, 0xc3, 0x32, 0x64, 0xfa,
+	0x4a, 0xde, 0x7c, 0xc4, 0xb9, 0x23, 0x2f, 0x7d, 0xbe, 0x06, 0x41, 0xc7, 0x2e, 0x2c, 0xda, 0x83,
+	0x11, 0xab, 0xa7, 0x2f, 0xd2, 0x6d, 0x4a, 0xac, 0xf2, 0x28, 0xd7, 0x31, 0x28, 0x9c, 0x63, 0x97,
+	0x3f, 0xaa, 0xc5, 0x5b, 0x21, 0x8f, 0x03, 0xfb, 0xe0, 0x68, 0x0f, 0x80, 0xff, 0xe0, 0x8f, 0x8a,
+	0xe5, 0x39, 0xae, 0xea, 0x7e, 0x16, 0x55, 0x49, 0x6f, 0x97, 0xe2, 0xc3, 0x82, 0x47, 0xc6, 0x01,
+	0x6c, 0xf4, 0xc7, 0x12, 0x20, 0xda, 0x33, 0x4d, 0x8d, 0x74, 0x89, 0x6e, 0x2b, 0x1a, 0x1f, 0xa5,
+	0xe5, 0x31, 0xae, 0xf2, 0xed, 0x41, 0x2b, 0x18, 0x13, 0x8c, 0xaa, 0xf6, 0xbe, 0x17, 0xc4, 0x59,
+	0x71, 0x82, 0x5e, 0xb6, 0x89, 0xbb, 0x62, 0xd6, 0xe3, 0x99, 0x36, 0x31, 0xf9, 0xb9, 0xd6, 0xdf,
+	0x44, 0x41, 0xc7, 0x2e, 0x2c, 0x7a, 0x0a, 0x73, 0x6e, 0xc7, 0x28, 0x36, 0x0c, 0x7b, 0x45, 0xd5,
+	0x08, 0xed, 0x53, 0x9b, 0x74, 0xcb, 0x13, 0xdc, 0xc1, 0xbc, 0xb6, 0x19, 0x9c, 0xc8, 0x85, 0x53,
+	0xa4, 0x51, 0x17, 0x2a, 0x6e, 0x70, 0x62, 0x27, 0xd7, 0x8b, 0x8e, 0x8f, 0x68, 0x4b, 0xd1, 0x9c,
+	0x4f, 0x28, 0x93, 0x5c, 0xc1, 0x6b, 0xc7, 0x47, 0x95, 0xca, 0xf2, 0xc9, 0xac, 0x78, 0x10, 0x16,
+	0x7a, 0x0f, 0xca, 0x4a, 0x9a, 0x9e, 0x29, 0xae, 0xe7, 0xa7, 0x2c, 0xe2, 0xa5, 0x2a, 0x48, 0x95,
+	0x46, 0x36, 0x4c, 0x29, 0xe1, 0xde, 0x5d, 0x5a, 0x9e, 0xce, 0xf4, 0x1a, 0x1b, 0x69, 0xf9, 0xf5,
+	0x1f, 0x4e, 0x22, 0x04, 0x8a, 0x63, 0x1a, 0xd0, 0xef, 0x03, 0x52, 0xa2, 0xed, 0xc6, 0xb4, 0x8c,
+	0x32, 0x25, 0xba, 0x58, 0x9f, 0xb2, 0xef, 0x76, 0x31, 0x12, 0xc5, 0x09, 0x7a, 0x58, 0x81, 0xae,
+	0x44, 0x5a, 0xa4, 0x69, 0x79, 0x9e, 0x2b, 0xaf, 0x65, 0x53, 0xee, 0xc9, 0x05, 0xbe, 0x14, 0x45,
+	0x11, 0x71, 0x5c, 0x09, 0x5a, 0x83, 0x59, 0x31, 0xb8, 0xad, 0x53, 0x65, 0x97, 0x34, 0xfb, 0xb4,
+	0x65, 0x6b, 0xb4, 0x3c, 0xc3, 0xe3, 0x3b, 0xff, 0x5a, 0xb9, 0x98, 0x40, 0xc7, 0x89, 0x52, 0xe8,
+	0x6d, 0x98, 0xda, 0x35, 0xac, 0x1d, 0xb5, 0xdd, 0x26, 0xba, 0x8b, 0x34, 0xcb, 0x91, 0xf8, 0x3b,
+	0xd0, 0x4a, 0x84, 0x86, 0x63, 0xdc, 0x88, 0xc2, 0x45, 0x81, 0xdc, 0xb0, 0x8c, 0xd6, 0xba, 0xd1,
+	0xd3, 0x6d, 0xa7, 0xec, 0xbb, 0xe8, 0xa5, 0xd1, 0x8b, 0x8b, 0x49, 0x0c, 0x2f, 0x8e, 0x2a, 0x57,
+	0x93, 0xab, 0x7c, 0x9f, 0x09, 0x27, 0x63, 0x23, 0x13, 0xc6, 0x44, 0xe3, 0x3b, 0x7f, 0x90, 0x2a,
+	0x97, 0xf9, 0xd1, 0x7f, 0x30, 0x38, 0xe0, 0x79, 0x22, 0xd1, 0xf3, 0x3f, 0x75, 0x7c, 0x54, 0x19,
+	0x0b, 0x32, 0xe0, 0x90, 0x06, 0xde, 0xe8, 0x24, 0x3e, 0xaf, 0x9d, 0x4f, 0xb3, 0xf8, 0xe9, 0x1a,
+	0x9d, 0x7c, 0xd3, 0x5e, 0x5a, 0xa3, 0x53, 0x00, 0xf2, 0xe4, 0x27, 0xf3, 0xff, 0xce, 0xc1, 0x8c,
+	0xcf, 0x9c, 0xb9, 0xd1, 0x29, 0x41, 0xe4, 0xd7, 0x0d, 0xe3, 0xd9, 0x9a, 0x8f, 0xfc, 0xa5, 0xfb,
+	0xff, 0xd7, 0x7c, 0xe4, 0xdb, 0x96, 0x72, 0x7b, 0xf8, 0xbb, 0x5c, 0x70, 0x02, 0xa7, 0xec, 0x80,
+	0x79, 0x09, 0x3d, 0xd3, 0x3f, 0xba, 0x26, 0x1a, 0xf9, 0xeb, 0x3c, 0x4c, 0x45, 0x4f, 0x63, 0xa8,
+	0x51, 0x42, 0x1a, 0xd8, 0x28, 0xd1, 0x80, 0xd9, 0xdd, 0x9e, 0xa6, 0xf5, 0xf9, 0x1c, 0x02, 0xdd,
+	0x12, 0xce, 0x27, 0xcb, 0x9f, 0x0a, 0xc9, 0xd9, 0x95, 0x04, 0x1e, 0x9c, 0x28, 0x19, 0xef, 0x9b,
+	0x28, 0xfc, 0xd0, 0xbe, 0x89, 0xa1, 0x33, 0xf4, 0x4d, 0x24, 0xb7, 0x9e, 0xe4, 0xcf, 0xd4, 0x7a,
+	0x72, 0x96, 0xa6, 0x89, 0x84, 0x20, 0x36, 0xb0, 0x01, 0xf8, 0x17, 0x30, 0x11, 0x6e, 0xe4, 0x71,
+	0xf6, 0xd2, 0xe9, 0x25, 0x12, 0x9f, 0x86, 0x03, 0x7b, 0xe9, 0x8c, 0x63, 0x8f, 0x43, 0xfe, 0x23,
+	0x09, 0xe6, 0x92, 0x1b, 0x76, 0x91, 0x06, 0x13, 0x5d, 0xe5, 0x30, 0xd8, 0x44, 0x2d, 0x9d, 0xf1,
+	0x65, 0x8c, 0x77, 0x70, 0xac, 0x87, 0xb0, 0x70, 0x04, 0x5b, 0xfe, 0x5e, 0x82, 0xf9, 0x94, 0xde,
+	0x89, 0xf3, 0xb5, 0x04, 0xbd, 0x0f, 0xa5, 0xae, 0x72, 0xd8, 0xec, 0x59, 0x1d, 0x72, 0xe6, 0xb7,
+	0x40, 0x7e, 0xa0, 0xd7, 0x05, 0x0a, 0xf6, 0xf0, 0xe4, 0xbf, 0x92, 0xe0, 0x27, 0xa9, 0x57, 0x25,
+	0x74, 0x2f, 0xd4, 0xe6, 0x21, 0x47, 0xda, 0x3c, 0x50, 0x5c, 0xf0, 0x15, 0x75, 0x79, 0x7c, 0x2e,
+	0x41, 0x39, 0xed, 0xee, 0x88, 0xee, 0x86, 0x8c, 0xfc, 0x59, 0xc4, 0xc8, 0xe9, 0x98, 0xdc, 0x2b,
+	0xb2, 0xf1, 0xdf, 0x25, 0xb8, 0x7c, 0x42, 0x0d, 0xe6, 0x5d, 0x51, 0x48, 0x3b, 0xc8, 0xc5, 0x9f,
+	0xad, 0xc5, 0x37, 0x2f, 0xff, 0x8a, 0x92, 0xc0, 0x83, 0x53, 0xa5, 0xd1, 0x36, 0xcc, 0x8b, 0xfb,
+	0x51, 0x94, 0x26, 0xca, 0x0b, 0xde, 0x0d, 0xb7, 0x9c, 0xcc, 0x82, 0xd3, 0x64, 0xe5, 0xbf, 0x95,
+	0x60, 0x2e, 0xf9, 0x51, 0x00, 0xbd, 0x15, 0x5a, 0xf2, 0x4a, 0x64, 0xc9, 0x27, 0x23, 0x52, 0x62,
+	0xc1, 0x3f, 0x84, 0x09, 0xf1, 0x74, 0x20, 0x60, 0x84, 0x33, 0xcb, 0x49, 0x19, 0x44, 0x40, 0xb8,
+	0x05, 0x2c, 0x3f, 0x26, 0xe1, 0x31, 0x1c, 0x41, 0x93, 0x3f, 0xcd, 0xc1, 0x50, 0xb3, 0xa5, 0x68,
+	0xe4, 0x1c, 0xea, 0xd7, 0x5f, 0x85, 0xea, 0xd7, 0x41, 0xff, 0xcf, 0x8c, 0x5b, 0x95, 0x5a, 0xba,
+	0xe2, 0x48, 0xe9, 0xfa, 0x46, 0x26, 0xb4, 0x93, 0xab, 0xd6, 0xdf, 0x82, 0x11, 0x4f, 0xe9, 0xe9,
+	0x92, 0xa9, 0xfc, 0x97, 0x39, 0x18, 0x0d, 0xa8, 0x38, 0x65, 0x2a, 0xde, 0x0d, 0xd5, 0x1f, 0xf9,
+	0x0c, 0x0f, 0x35, 0x01, 0x5d, 0x55, 0xb7, 0xe2, 0x70, 0xfa, 0xa4, 0xfd, 0xce, 0xd8, 0x78, 0x21,
+	0xf2, 0x0b, 0x98, 0xb0, 0x15, 0xab, 0x43, 0x6c, 0xef, 0xc3, 0x85, 0xd3, 0xc7, 0xe5, 0x35, 0xec,
+	0x6f, 0x85, 0xa8, 0x38, 0xc2, 0x7d, 0xe9, 0x21, 0x8c, 0x87, 0x94, 0x9d, 0xaa, 0xcd, 0xf9, 0xef,
+	0x25, 0xf8, 0xd9, 0xc0, 0xc7, 0x1e, 0x54, 0x0f, 0x1d, 0x92, 0x6a, 0xe4, 0x90, 0x2c, 0xa4, 0x03,
+	0xbc, 0xba, 0x76, 0xb9, 0xfa, 0xcd, 0xe7, 0xdf, 0x2d, 0x5c, 0xf8, 0xe6, 0xbb, 0x85, 0x0b, 0xdf,
+	0x7e, 0xb7, 0x70, 0xe1, 0x0f, 0x8e, 0x17, 0xa4, 0xe7, 0xc7, 0x0b, 0xd2, 0x37, 0xc7, 0x0b, 0xd2,
+	0xb7, 0xc7, 0x0b, 0xd2, 0x7f, 0x1e, 0x2f, 0x48, 0x7f, 0xfa, 0xfd, 0xc2, 0x85, 0xf7, 0x8b, 0x02,
+	0xee, 0xff, 0x02, 0x00, 0x00, 0xff, 0xff, 0x98, 0xf4, 0xad, 0x8a, 0xba, 0x3e, 0x00, 0x00,
 }
 
 func (m *AllowedCSIDriver) Marshal() (dAtA []byte, err error) {
@@ -2843,6 +2817,13 @@ func (m *HTTPIngressPath) MarshalToSizedBuffer(dAtA []byte) (int, error) {
 	_ = i
 	var l int
 	_ = l
+	if m.PathType != nil {
+		i -= len(*m.PathType)
+		copy(dAtA[i:], *m.PathType)
+		i = encodeVarintGenerated(dAtA, i, uint64(len(*m.PathType)))
+		i--
+		dAtA[i] = 0x1a
+	}
 	{
 		size, err := m.Backend.MarshalToSizedBuffer(dAtA[:i])
 		if err != nil {
@@ -3066,6 +3047,18 @@ func (m *IngressBackend) MarshalToSizedBuffer(dAtA []byte) (int, error) {
 	_ = i
 	var l int
 	_ = l
+	if m.Resource != nil {
+		{
+			size, err := m.Resource.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x1a
+	}
 	{
 		size, err := m.ServicePort.MarshalToSizedBuffer(dAtA[:i])
 		if err != nil {
@@ -3224,6 +3217,13 @@ func (m *IngressSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) {
 	_ = i
 	var l int
 	_ = l
+	if m.IngressClassName != nil {
+		i -= len(*m.IngressClassName)
+		copy(dAtA[i:], *m.IngressClassName)
+		i = encodeVarintGenerated(dAtA, i, uint64(len(*m.IngressClassName)))
+		i--
+		dAtA[i] = 0x22
+	}
 	if len(m.Rules) > 0 {
 		for iNdEx := len(m.Rules) - 1; iNdEx >= 0; iNdEx-- {
 			{
@@ -4332,29 +4332,6 @@ func (m *ReplicaSetStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) {
 	return len(dAtA) - i, nil
 }
 
-func (m *ReplicationControllerDummy) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *ReplicationControllerDummy) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *ReplicationControllerDummy) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	return len(dAtA) - i, nil
-}
-
 func (m *RollbackConfig) Marshal() (dAtA []byte, err error) {
 	size := m.Size()
 	dAtA = make([]byte, size)
@@ -5133,6 +5110,10 @@ func (m *HTTPIngressPath) Size() (n int) {
 	n += 1 + l + sovGenerated(uint64(l))
 	l = m.Backend.Size()
 	n += 1 + l + sovGenerated(uint64(l))
+	if m.PathType != nil {
+		l = len(*m.PathType)
+		n += 1 + l + sovGenerated(uint64(l))
+	}
 	return n
 }
 
@@ -5215,6 +5196,10 @@ func (m *IngressBackend) Size() (n int) {
 	n += 1 + l + sovGenerated(uint64(l))
 	l = m.ServicePort.Size()
 	n += 1 + l + sovGenerated(uint64(l))
+	if m.Resource != nil {
+		l = m.Resource.Size()
+		n += 1 + l + sovGenerated(uint64(l))
+	}
 	return n
 }
 
@@ -5283,6 +5268,10 @@ func (m *IngressSpec) Size() (n int) {
 			n += 1 + l + sovGenerated(uint64(l))
 		}
 	}
+	if m.IngressClassName != nil {
+		l = len(*m.IngressClassName)
+		n += 1 + l + sovGenerated(uint64(l))
+	}
 	return n
 }
 
@@ -5675,15 +5664,6 @@ func (m *ReplicaSetStatus) Size() (n int) {
 	return n
 }
 
-func (m *ReplicationControllerDummy) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	return n
-}
-
 func (m *RollbackConfig) Size() (n int) {
 	if m == nil {
 		return 0
@@ -6122,6 +6102,7 @@ func (this *HTTPIngressPath) String() string {
 	s := strings.Join([]string{`&HTTPIngressPath{`,
 		`Path:` + fmt.Sprintf("%v", this.Path) + `,`,
 		`Backend:` + strings.Replace(strings.Replace(this.Backend.String(), "IngressBackend", "IngressBackend", 1), `&`, ``, 1) + `,`,
+		`PathType:` + valueToStringGenerated(this.PathType) + `,`,
 		`}`,
 	}, "")
 	return s
@@ -6193,6 +6174,7 @@ func (this *IngressBackend) String() string {
 	s := strings.Join([]string{`&IngressBackend{`,
 		`ServiceName:` + fmt.Sprintf("%v", this.ServiceName) + `,`,
 		`ServicePort:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ServicePort), "IntOrString", "intstr.IntOrString", 1), `&`, ``, 1) + `,`,
+		`Resource:` + strings.Replace(fmt.Sprintf("%v", this.Resource), "TypedLocalObjectReference", "v11.TypedLocalObjectReference", 1) + `,`,
 		`}`,
 	}, "")
 	return s
@@ -6252,6 +6234,7 @@ func (this *IngressSpec) String() string {
 		`Backend:` + strings.Replace(this.Backend.String(), "IngressBackend", "IngressBackend", 1) + `,`,
 		`TLS:` + repeatedStringForTLS + `,`,
 		`Rules:` + repeatedStringForRules + `,`,
+		`IngressClassName:` + valueToStringGenerated(this.IngressClassName) + `,`,
 		`}`,
 	}, "")
 	return s
@@ -6547,15 +6530,6 @@ func (this *ReplicaSetStatus) String() string {
 	}, "")
 	return s
 }
-func (this *ReplicationControllerDummy) String() string {
-	if this == nil {
-		return "nil"
-	}
-	s := strings.Join([]string{`&ReplicationControllerDummy{`,
-		`}`,
-	}, "")
-	return s
-}
 func (this *RollbackConfig) String() string {
 	if this == nil {
 		return "nil"
@@ -9672,6 +9646,39 @@ func (m *HTTPIngressPath) Unmarshal(dAtA []byte) error {
 				return err
 			}
 			iNdEx = postIndex
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field PathType", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			s := PathType(dAtA[iNdEx:postIndex])
+			m.PathType = &s
+			iNdEx = postIndex
 		default:
 			iNdEx = preIndex
 			skippy, err := skipGenerated(dAtA[iNdEx:])
@@ -10328,6 +10335,42 @@ func (m *IngressBackend) Unmarshal(dAtA []byte) error {
 				return err
 			}
 			iNdEx = postIndex
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Resource == nil {
+				m.Resource = &v11.TypedLocalObjectReference{}
+			}
+			if err := m.Resource.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
 		default:
 			iNdEx = preIndex
 			skippy, err := skipGenerated(dAtA[iNdEx:])
@@ -10812,6 +10855,39 @@ func (m *IngressSpec) Unmarshal(dAtA []byte) error {
 				return err
 			}
 			iNdEx = postIndex
+		case 4:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field IngressClassName", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			s := string(dAtA[iNdEx:postIndex])
+			m.IngressClassName = &s
+			iNdEx = postIndex
 		default:
 			iNdEx = preIndex
 			skippy, err := skipGenerated(dAtA[iNdEx:])
@@ -13816,59 +13892,6 @@ func (m *ReplicaSetStatus) Unmarshal(dAtA []byte) error {
 	}
 	return nil
 }
-func (m *ReplicationControllerDummy) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowGenerated
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: ReplicationControllerDummy: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: ReplicationControllerDummy: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		default:
-			iNdEx = preIndex
-			skippy, err := skipGenerated(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if skippy < 0 {
-				return ErrInvalidLengthGenerated
-			}
-			if (iNdEx + skippy) < 0 {
-				return ErrInvalidLengthGenerated
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
 func (m *RollbackConfig) Unmarshal(dAtA []byte) error {
 	l := len(dAtA)
 	iNdEx := 0
@@ -15209,6 +15232,7 @@ func (m *SupplementalGroupsStrategyOptions) Unmarshal(dAtA []byte) error {
 func skipGenerated(dAtA []byte) (n int, err error) {
 	l := len(dAtA)
 	iNdEx := 0
+	depth := 0
 	for iNdEx < l {
 		var wire uint64
 		for shift := uint(0); ; shift += 7 {
@@ -15240,10 +15264,8 @@ func skipGenerated(dAtA []byte) (n int, err error) {
 					break
 				}
 			}
-			return iNdEx, nil
 		case 1:
 			iNdEx += 8
-			return iNdEx, nil
 		case 2:
 			var length int
 			for shift := uint(0); ; shift += 7 {
@@ -15264,55 +15286,30 @@ func skipGenerated(dAtA []byte) (n int, err error) {
 				return 0, ErrInvalidLengthGenerated
 			}
 			iNdEx += length
-			if iNdEx < 0 {
-				return 0, ErrInvalidLengthGenerated
-			}
-			return iNdEx, nil
 		case 3:
-			for {
-				var innerWire uint64
-				var start int = iNdEx
-				for shift := uint(0); ; shift += 7 {
-					if shift >= 64 {
-						return 0, ErrIntOverflowGenerated
-					}
-					if iNdEx >= l {
-						return 0, io.ErrUnexpectedEOF
-					}
-					b := dAtA[iNdEx]
-					iNdEx++
-					innerWire |= (uint64(b) & 0x7F) << shift
-					if b < 0x80 {
-						break
-					}
-				}
-				innerWireType := int(innerWire & 0x7)
-				if innerWireType == 4 {
-					break
-				}
-				next, err := skipGenerated(dAtA[start:])
-				if err != nil {
-					return 0, err
-				}
-				iNdEx = start + next
-				if iNdEx < 0 {
-					return 0, ErrInvalidLengthGenerated
-				}
-			}
-			return iNdEx, nil
+			depth++
 		case 4:
-			return iNdEx, nil
+			if depth == 0 {
+				return 0, ErrUnexpectedEndOfGroupGenerated
+			}
+			depth--
 		case 5:
 			iNdEx += 4
-			return iNdEx, nil
 		default:
 			return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
 		}
+		if iNdEx < 0 {
+			return 0, ErrInvalidLengthGenerated
+		}
+		if depth == 0 {
+			return iNdEx, nil
+		}
 	}
-	panic("unreachable")
+	return 0, io.ErrUnexpectedEOF
 }
 
 var (
-	ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling")
-	ErrIntOverflowGenerated   = fmt.Errorf("proto: integer overflow")
+	ErrInvalidLengthGenerated        = fmt.Errorf("proto: negative length found during unmarshaling")
+	ErrIntOverflowGenerated          = fmt.Errorf("proto: integer overflow")
+	ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group")
 )
diff --git a/vendor/k8s.io/api/extensions/v1beta1/generated.proto b/vendor/k8s.io/api/extensions/v1beta1/generated.proto
index 6c90cb3c..ef8367e0 100644
--- a/vendor/k8s.io/api/extensions/v1beta1/generated.proto
+++ b/vendor/k8s.io/api/extensions/v1beta1/generated.proto
@@ -408,19 +408,33 @@ message FSGroupStrategyOptions {
   repeated IDRange ranges = 2;
 }
 
-// HTTPIngressPath associates a path regex with a backend. Incoming urls matching
-// the path are forwarded to the backend.
+// HTTPIngressPath associates a path with a backend. Incoming urls matching the
+// path are forwarded to the backend.
 message HTTPIngressPath {
-  // Path is an extended POSIX regex as defined by IEEE Std 1003.1,
-  // (i.e this follows the egrep/unix syntax, not the perl syntax)
-  // matched against the path of an incoming request. Currently it can
-  // contain characters disallowed from the conventional "path"
-  // part of a URL as defined by RFC 3986. Paths must begin with
-  // a '/'. If unspecified, the path defaults to a catch all sending
-  // traffic to the backend.
+  // Path is matched against the path of an incoming request. Currently it can
+  // contain characters disallowed from the conventional "path" part of a URL
+  // as defined by RFC 3986. Paths must begin with a '/'. When unspecified,
+  // all paths from incoming requests are matched.
   // +optional
   optional string path = 1;
 
+  // PathType determines the interpretation of the Path matching. PathType can
+  // be one of the following values:
+  // * Exact: Matches the URL path exactly.
+  // * Prefix: Matches based on a URL path prefix split by '/'. Matching is
+  //   done on a path element by element basis. A path element refers is the
+  //   list of labels in the path split by the '/' separator. A request is a
+  //   match for path p if every p is an element-wise prefix of p of the
+  //   request path. Note that if the last element of the path is a substring
+  //   of the last element in request path, it is not a match (e.g. /foo/bar
+  //   matches /foo/bar/baz, but does not match /foo/barbaz).
+  // * ImplementationSpecific: Interpretation of the Path matching is up to
+  //   the IngressClass. Implementations can treat this as a separate PathType
+  //   or treat it identically to Prefix or Exact path types.
+  // Implementations are required to support all path types.
+  // Defaults to ImplementationSpecific.
+  optional string pathType = 3;
+
   // Backend defines the referenced service endpoint to which the traffic
   // will be forwarded to.
   optional IngressBackend backend = 2;
@@ -458,16 +472,16 @@ message IDRange {
 }
 
 // DEPRECATED 1.9 - This group version of IPBlock is deprecated by networking/v1/IPBlock.
-// IPBlock describes a particular CIDR (Ex. "192.168.1.1/24") that is allowed to the pods
-// matched by a NetworkPolicySpec's podSelector. The except entry describes CIDRs that should
-// not be included within this rule.
+// IPBlock describes a particular CIDR (Ex. "192.168.1.1/24","2001:db9::/64") that is allowed
+// to the pods matched by a NetworkPolicySpec's podSelector. The except entry describes CIDRs
+// that should not be included within this rule.
 message IPBlock {
   // CIDR is a string representing the IP Block
-  // Valid examples are "192.168.1.1/24"
+  // Valid examples are "192.168.1.1/24" or "2001:db9::/64"
   optional string cidr = 1;
 
   // Except is a slice of CIDRs that should not be included within an IP Block
-  // Valid examples are "192.168.1.1/24"
+  // Valid examples are "192.168.1.1/24" or "2001:db9::/64"
   // Except values will be rejected if they are outside the CIDR range
   // +optional
   repeated string except = 2;
@@ -498,10 +512,18 @@ message Ingress {
 // IngressBackend describes all endpoints for a given service and port.
 message IngressBackend {
   // Specifies the name of the referenced service.
+  // +optional
   optional string serviceName = 1;
 
   // Specifies the port of the referenced service.
+  // +optional
   optional k8s.io.apimachinery.pkg.util.intstr.IntOrString servicePort = 2;
+
+  // Resource is an ObjectRef to another Kubernetes resource in the namespace
+  // of the Ingress object. If resource is specified, serviceName and servicePort
+  // must not be specified.
+  // +optional
+  optional k8s.io.api.core.v1.TypedLocalObjectReference resource = 3;
 }
 
 // IngressList is a collection of Ingress.
@@ -519,18 +541,28 @@ message IngressList {
 // the related backend services. Incoming requests are first evaluated for a host
 // match, then routed to the backend associated with the matching IngressRuleValue.
 message IngressRule {
-  // Host is the fully qualified domain name of a network host, as defined
-  // by RFC 3986. Note the following deviations from the "host" part of the
-  // URI as defined in the RFC:
-  // 1. IPs are not allowed. Currently an IngressRuleValue can only apply to the
-  // 	  IP in the Spec of the parent Ingress.
+  // Host is the fully qualified domain name of a network host, as defined by RFC 3986.
+  // Note the following deviations from the "host" part of the
+  // URI as defined in RFC 3986:
+  // 1. IPs are not allowed. Currently an IngressRuleValue can only apply to
+  //    the IP in the Spec of the parent Ingress.
   // 2. The `:` delimiter is not respected because ports are not allowed.
   // 	  Currently the port of an Ingress is implicitly :80 for http and
   // 	  :443 for https.
   // Both these may change in the future.
-  // Incoming requests are matched against the host before the IngressRuleValue.
-  // If the host is unspecified, the Ingress routes all traffic based on the
-  // specified IngressRuleValue.
+  // Incoming requests are matched against the host before the
+  // IngressRuleValue. If the host is unspecified, the Ingress routes all
+  // traffic based on the specified IngressRuleValue.
+  //
+  // Host can be "precise" which is a domain name without the terminating dot of
+  // a network host (e.g. "foo.bar.com") or "wildcard", which is a domain name
+  // prefixed with a single wildcard label (e.g. "*.foo.com").
+  // The wildcard character '*' must appear by itself as the first DNS label and
+  // matches only a single label. You cannot have a wildcard label by itself (e.g. Host == "*").
+  // Requests will be matched against the Host field in the following way:
+  // 1. If Host is precise, the request matches this rule if the http host header is equal to Host.
+  // 2. If Host is a wildcard, then the request matches this rule if the http host header
+  // is to equal to the suffix (removing the first label) of the wildcard rule.
   // +optional
   optional string host = 1;
 
@@ -554,6 +586,19 @@ message IngressRuleValue {
 
 // IngressSpec describes the Ingress the user wishes to exist.
 message IngressSpec {
+  // IngressClassName is the name of the IngressClass cluster resource. The
+  // associated IngressClass defines which controller will implement the
+  // resource. This replaces the deprecated `kubernetes.io/ingress.class`
+  // annotation. For backwards compatibility, when that annotation is set, it
+  // must be given precedence over this field. The controller may emit a
+  // warning if the field and annotation have different values.
+  // Implementations of this API should ignore Ingresses without a class
+  // specified. An IngressClass resource may be marked as default, which can
+  // be used to set a default value for this field. For more information,
+  // refer to the IngressClass documentation.
+  // +optional
+  optional string ingressClassName = 4;
+
   // A default backend capable of servicing requests that don't match any
   // rule. At least one of 'backend' or 'rules' must be specified. This field
   // is optional to allow the loadbalancer controller or defaulting logic to
@@ -1025,10 +1070,6 @@ message ReplicaSetStatus {
   repeated ReplicaSetCondition conditions = 6;
 }
 
-// Dummy definition
-message ReplicationControllerDummy {
-}
-
 // DEPRECATED.
 message RollbackConfig {
   // The revision to rollback to. If set to 0, rollback to the last revision.
diff --git a/vendor/k8s.io/api/extensions/v1beta1/register.go b/vendor/k8s.io/api/extensions/v1beta1/register.go
index 7625f678..c69eff0b 100644
--- a/vendor/k8s.io/api/extensions/v1beta1/register.go
+++ b/vendor/k8s.io/api/extensions/v1beta1/register.go
@@ -47,7 +47,6 @@ func addKnownTypes(scheme *runtime.Scheme) error {
 		&Deployment{},
 		&DeploymentList{},
 		&DeploymentRollback{},
-		&ReplicationControllerDummy{},
 		&Scale{},
 		&DaemonSetList{},
 		&DaemonSet{},
diff --git a/vendor/k8s.io/api/extensions/v1beta1/types.go b/vendor/k8s.io/api/extensions/v1beta1/types.go
index eb255341..8934c061 100644
--- a/vendor/k8s.io/api/extensions/v1beta1/types.go
+++ b/vendor/k8s.io/api/extensions/v1beta1/types.go
@@ -67,13 +67,6 @@ type Scale struct {
 	Status ScaleStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
 }
 
-// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
-
-// Dummy definition
-type ReplicationControllerDummy struct {
-	metav1.TypeMeta `json:",inline"`
-}
-
 // +genclient
 // +genclient:method=GetScale,verb=get,subresource=scale,result=Scale
 // +genclient:method=UpdateScale,verb=update,subresource=scale,input=Scale,result=Scale
@@ -576,6 +569,19 @@ type IngressList struct {
 
 // IngressSpec describes the Ingress the user wishes to exist.
 type IngressSpec struct {
+	// IngressClassName is the name of the IngressClass cluster resource. The
+	// associated IngressClass defines which controller will implement the
+	// resource. This replaces the deprecated `kubernetes.io/ingress.class`
+	// annotation. For backwards compatibility, when that annotation is set, it
+	// must be given precedence over this field. The controller may emit a
+	// warning if the field and annotation have different values.
+	// Implementations of this API should ignore Ingresses without a class
+	// specified. An IngressClass resource may be marked as default, which can
+	// be used to set a default value for this field. For more information,
+	// refer to the IngressClass documentation.
+	// +optional
+	IngressClassName *string `json:"ingressClassName,omitempty" protobuf:"bytes,4,opt,name=ingressClassName"`
+
 	// A default backend capable of servicing requests that don't match any
 	// rule. At least one of 'backend' or 'rules' must be specified. This field
 	// is optional to allow the loadbalancer controller or defaulting logic to
@@ -627,18 +633,28 @@ type IngressStatus struct {
 // the related backend services. Incoming requests are first evaluated for a host
 // match, then routed to the backend associated with the matching IngressRuleValue.
 type IngressRule struct {
-	// Host is the fully qualified domain name of a network host, as defined
-	// by RFC 3986. Note the following deviations from the "host" part of the
-	// URI as defined in the RFC:
-	// 1. IPs are not allowed. Currently an IngressRuleValue can only apply to the
-	//	  IP in the Spec of the parent Ingress.
+	// Host is the fully qualified domain name of a network host, as defined by RFC 3986.
+	// Note the following deviations from the "host" part of the
+	// URI as defined in RFC 3986:
+	// 1. IPs are not allowed. Currently an IngressRuleValue can only apply to
+	//    the IP in the Spec of the parent Ingress.
 	// 2. The `:` delimiter is not respected because ports are not allowed.
 	//	  Currently the port of an Ingress is implicitly :80 for http and
 	//	  :443 for https.
 	// Both these may change in the future.
-	// Incoming requests are matched against the host before the IngressRuleValue.
-	// If the host is unspecified, the Ingress routes all traffic based on the
-	// specified IngressRuleValue.
+	// Incoming requests are matched against the host before the
+	// IngressRuleValue. If the host is unspecified, the Ingress routes all
+	// traffic based on the specified IngressRuleValue.
+	//
+	// Host can be "precise" which is a domain name without the terminating dot of
+	// a network host (e.g. "foo.bar.com") or "wildcard", which is a domain name
+	// prefixed with a single wildcard label (e.g. "*.foo.com").
+	// The wildcard character '*' must appear by itself as the first DNS label and
+	// matches only a single label. You cannot have a wildcard label by itself (e.g. Host == "*").
+	// Requests will be matched against the Host field in the following way:
+	// 1. If Host is precise, the request matches this rule if the http host header is equal to Host.
+	// 2. If Host is a wildcard, then the request matches this rule if the http host header
+	// is to equal to the suffix (removing the first label) of the wildcard rule.
 	// +optional
 	Host string `json:"host,omitempty" protobuf:"bytes,1,opt,name=host"`
 	// IngressRuleValue represents a rule to route requests for this IngressRule.
@@ -677,19 +693,63 @@ type HTTPIngressRuleValue struct {
 	// options usable by a loadbalancer, like http keep-alive.
 }
 
-// HTTPIngressPath associates a path regex with a backend. Incoming urls matching
-// the path are forwarded to the backend.
+// PathType represents the type of path referred to by a HTTPIngressPath.
+type PathType string
+
+const (
+	// PathTypeExact matches the URL path exactly and with case sensitivity.
+	PathTypeExact = PathType("Exact")
+
+	// PathTypePrefix matches based on a URL path prefix split by '/'. Matching
+	// is case sensitive and done on a path element by element basis. A path
+	// element refers to the list of labels in the path split by the '/'
+	// separator. A request is a match for path p if every p is an element-wise
+	// prefix of p of the request path. Note that if the last element of the
+	// path is a substring of the last element in request path, it is not a
+	// match (e.g. /foo/bar matches /foo/bar/baz, but does not match
+	// /foo/barbaz). If multiple matching paths exist in an Ingress spec, the
+	// longest matching path is given priority.
+	// Examples:
+	// - /foo/bar does not match requests to /foo/barbaz
+	// - /foo/bar matches request to /foo/bar and /foo/bar/baz
+	// - /foo and /foo/ both match requests to /foo and /foo/. If both paths are
+	//   present in an Ingress spec, the longest matching path (/foo/) is given
+	//   priority.
+	PathTypePrefix = PathType("Prefix")
+
+	// PathTypeImplementationSpecific matching is up to the IngressClass.
+	// Implementations can treat this as a separate PathType or treat it
+	// identically to Prefix or Exact path types.
+	PathTypeImplementationSpecific = PathType("ImplementationSpecific")
+)
+
+// HTTPIngressPath associates a path with a backend. Incoming urls matching the
+// path are forwarded to the backend.
 type HTTPIngressPath struct {
-	// Path is an extended POSIX regex as defined by IEEE Std 1003.1,
-	// (i.e this follows the egrep/unix syntax, not the perl syntax)
-	// matched against the path of an incoming request. Currently it can
-	// contain characters disallowed from the conventional "path"
-	// part of a URL as defined by RFC 3986. Paths must begin with
-	// a '/'. If unspecified, the path defaults to a catch all sending
-	// traffic to the backend.
+	// Path is matched against the path of an incoming request. Currently it can
+	// contain characters disallowed from the conventional "path" part of a URL
+	// as defined by RFC 3986. Paths must begin with a '/'. When unspecified,
+	// all paths from incoming requests are matched.
 	// +optional
 	Path string `json:"path,omitempty" protobuf:"bytes,1,opt,name=path"`
 
+	// PathType determines the interpretation of the Path matching. PathType can
+	// be one of the following values:
+	// * Exact: Matches the URL path exactly.
+	// * Prefix: Matches based on a URL path prefix split by '/'. Matching is
+	//   done on a path element by element basis. A path element refers is the
+	//   list of labels in the path split by the '/' separator. A request is a
+	//   match for path p if every p is an element-wise prefix of p of the
+	//   request path. Note that if the last element of the path is a substring
+	//   of the last element in request path, it is not a match (e.g. /foo/bar
+	//   matches /foo/bar/baz, but does not match /foo/barbaz).
+	// * ImplementationSpecific: Interpretation of the Path matching is up to
+	//   the IngressClass. Implementations can treat this as a separate PathType
+	//   or treat it identically to Prefix or Exact path types.
+	// Implementations are required to support all path types.
+	// Defaults to ImplementationSpecific.
+	PathType *PathType `json:"pathType,omitempty" protobuf:"bytes,3,opt,name=pathType"`
+
 	// Backend defines the referenced service endpoint to which the traffic
 	// will be forwarded to.
 	Backend IngressBackend `json:"backend" protobuf:"bytes,2,opt,name=backend"`
@@ -698,10 +758,18 @@ type HTTPIngressPath struct {
 // IngressBackend describes all endpoints for a given service and port.
 type IngressBackend struct {
 	// Specifies the name of the referenced service.
-	ServiceName string `json:"serviceName" protobuf:"bytes,1,opt,name=serviceName"`
+	// +optional
+	ServiceName string `json:"serviceName,omitempty" protobuf:"bytes,1,opt,name=serviceName"`
 
 	// Specifies the port of the referenced service.
-	ServicePort intstr.IntOrString `json:"servicePort" protobuf:"bytes,2,opt,name=servicePort"`
+	// +optional
+	ServicePort intstr.IntOrString `json:"servicePort,omitempty" protobuf:"bytes,2,opt,name=servicePort"`
+
+	// Resource is an ObjectRef to another Kubernetes resource in the namespace
+	// of the Ingress object. If resource is specified, serviceName and servicePort
+	// must not be specified.
+	// +optional
+	Resource *v1.TypedLocalObjectReference `json:"resource,omitempty" protobuf:"bytes,3,opt,name=resource"`
 }
 
 // +genclient
@@ -1341,15 +1409,15 @@ type NetworkPolicyPort struct {
 }
 
 // DEPRECATED 1.9 - This group version of IPBlock is deprecated by networking/v1/IPBlock.
-// IPBlock describes a particular CIDR (Ex. "192.168.1.1/24") that is allowed to the pods
-// matched by a NetworkPolicySpec's podSelector. The except entry describes CIDRs that should
-// not be included within this rule.
+// IPBlock describes a particular CIDR (Ex. "192.168.1.1/24","2001:db9::/64") that is allowed
+// to the pods matched by a NetworkPolicySpec's podSelector. The except entry describes CIDRs
+// that should not be included within this rule.
 type IPBlock struct {
 	// CIDR is a string representing the IP Block
-	// Valid examples are "192.168.1.1/24"
+	// Valid examples are "192.168.1.1/24" or "2001:db9::/64"
 	CIDR string `json:"cidr" protobuf:"bytes,1,name=cidr"`
 	// Except is a slice of CIDRs that should not be included within an IP Block
-	// Valid examples are "192.168.1.1/24"
+	// Valid examples are "192.168.1.1/24" or "2001:db9::/64"
 	// Except values will be rejected if they are outside the CIDR range
 	// +optional
 	Except []string `json:"except,omitempty" protobuf:"bytes,2,rep,name=except"`
diff --git a/vendor/k8s.io/api/extensions/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/extensions/v1beta1/types_swagger_doc_generated.go
index a7eb2ec9..9ccad924 100644
--- a/vendor/k8s.io/api/extensions/v1beta1/types_swagger_doc_generated.go
+++ b/vendor/k8s.io/api/extensions/v1beta1/types_swagger_doc_generated.go
@@ -230,9 +230,10 @@ func (FSGroupStrategyOptions) SwaggerDoc() map[string]string {
 }
 
 var map_HTTPIngressPath = map[string]string{
-	"":        "HTTPIngressPath associates a path regex with a backend. Incoming urls matching the path are forwarded to the backend.",
-	"path":    "Path is an extended POSIX regex as defined by IEEE Std 1003.1, (i.e this follows the egrep/unix syntax, not the perl syntax) matched against the path of an incoming request. Currently it can contain characters disallowed from the conventional \"path\" part of a URL as defined by RFC 3986. Paths must begin with a '/'. If unspecified, the path defaults to a catch all sending traffic to the backend.",
-	"backend": "Backend defines the referenced service endpoint to which the traffic will be forwarded to.",
+	"":         "HTTPIngressPath associates a path with a backend. Incoming urls matching the path are forwarded to the backend.",
+	"path":     "Path is matched against the path of an incoming request. Currently it can contain characters disallowed from the conventional \"path\" part of a URL as defined by RFC 3986. Paths must begin with a '/'. When unspecified, all paths from incoming requests are matched.",
+	"pathType": "PathType determines the interpretation of the Path matching. PathType can be one of the following values: * Exact: Matches the URL path exactly. * Prefix: Matches based on a URL path prefix split by '/'. Matching is\n  done on a path element by element basis. A path element refers is the\n  list of labels in the path split by the '/' separator. A request is a\n  match for path p if every p is an element-wise prefix of p of the\n  request path. Note that if the last element of the path is a substring\n  of the last element in request path, it is not a match (e.g. /foo/bar\n  matches /foo/bar/baz, but does not match /foo/barbaz).\n* ImplementationSpecific: Interpretation of the Path matching is up to\n  the IngressClass. Implementations can treat this as a separate PathType\n  or treat it identically to Prefix or Exact path types.\nImplementations are required to support all path types. Defaults to ImplementationSpecific.",
+	"backend":  "Backend defines the referenced service endpoint to which the traffic will be forwarded to.",
 }
 
 func (HTTPIngressPath) SwaggerDoc() map[string]string {
@@ -269,9 +270,9 @@ func (IDRange) SwaggerDoc() map[string]string {
 }
 
 var map_IPBlock = map[string]string{
-	"":       "DEPRECATED 1.9 - This group version of IPBlock is deprecated by networking/v1/IPBlock. IPBlock describes a particular CIDR (Ex. \"192.168.1.1/24\") that is allowed to the pods matched by a NetworkPolicySpec's podSelector. The except entry describes CIDRs that should not be included within this rule.",
-	"cidr":   "CIDR is a string representing the IP Block Valid examples are \"192.168.1.1/24\"",
-	"except": "Except is a slice of CIDRs that should not be included within an IP Block Valid examples are \"192.168.1.1/24\" Except values will be rejected if they are outside the CIDR range",
+	"":       "DEPRECATED 1.9 - This group version of IPBlock is deprecated by networking/v1/IPBlock. IPBlock describes a particular CIDR (Ex. \"192.168.1.1/24\",\"2001:db9::/64\") that is allowed to the pods matched by a NetworkPolicySpec's podSelector. The except entry describes CIDRs that should not be included within this rule.",
+	"cidr":   "CIDR is a string representing the IP Block Valid examples are \"192.168.1.1/24\" or \"2001:db9::/64\"",
+	"except": "Except is a slice of CIDRs that should not be included within an IP Block Valid examples are \"192.168.1.1/24\" or \"2001:db9::/64\" Except values will be rejected if they are outside the CIDR range",
 }
 
 func (IPBlock) SwaggerDoc() map[string]string {
@@ -293,6 +294,7 @@ var map_IngressBackend = map[string]string{
 	"":            "IngressBackend describes all endpoints for a given service and port.",
 	"serviceName": "Specifies the name of the referenced service.",
 	"servicePort": "Specifies the port of the referenced service.",
+	"resource":    "Resource is an ObjectRef to another Kubernetes resource in the namespace of the Ingress object. If resource is specified, serviceName and servicePort must not be specified.",
 }
 
 func (IngressBackend) SwaggerDoc() map[string]string {
@@ -311,7 +313,7 @@ func (IngressList) SwaggerDoc() map[string]string {
 
 var map_IngressRule = map[string]string{
 	"":     "IngressRule represents the rules mapping the paths under a specified host to the related backend services. Incoming requests are first evaluated for a host match, then routed to the backend associated with the matching IngressRuleValue.",
-	"host": "Host is the fully qualified domain name of a network host, as defined by RFC 3986. Note the following deviations from the \"host\" part of the URI as defined in the RFC: 1. IPs are not allowed. Currently an IngressRuleValue can only apply to the\n\t  IP in the Spec of the parent Ingress.\n2. The `:` delimiter is not respected because ports are not allowed.\n\t  Currently the port of an Ingress is implicitly :80 for http and\n\t  :443 for https.\nBoth these may change in the future. Incoming requests are matched against the host before the IngressRuleValue. If the host is unspecified, the Ingress routes all traffic based on the specified IngressRuleValue.",
+	"host": "Host is the fully qualified domain name of a network host, as defined by RFC 3986. Note the following deviations from the \"host\" part of the URI as defined in RFC 3986: 1. IPs are not allowed. Currently an IngressRuleValue can only apply to\n   the IP in the Spec of the parent Ingress.\n2. The `:` delimiter is not respected because ports are not allowed.\n\t  Currently the port of an Ingress is implicitly :80 for http and\n\t  :443 for https.\nBoth these may change in the future. Incoming requests are matched against the host before the IngressRuleValue. If the host is unspecified, the Ingress routes all traffic based on the specified IngressRuleValue.\n\nHost can be \"precise\" which is a domain name without the terminating dot of a network host (e.g. \"foo.bar.com\") or \"wildcard\", which is a domain name prefixed with a single wildcard label (e.g. \"*.foo.com\"). The wildcard character '*' must appear by itself as the first DNS label and matches only a single label. You cannot have a wildcard label by itself (e.g. Host == \"*\"). Requests will be matched against the Host field in the following way: 1. If Host is precise, the request matches this rule if the http host header is equal to Host. 2. If Host is a wildcard, then the request matches this rule if the http host header is to equal to the suffix (removing the first label) of the wildcard rule.",
 }
 
 func (IngressRule) SwaggerDoc() map[string]string {
@@ -327,10 +329,11 @@ func (IngressRuleValue) SwaggerDoc() map[string]string {
 }
 
 var map_IngressSpec = map[string]string{
-	"":        "IngressSpec describes the Ingress the user wishes to exist.",
-	"backend": "A default backend capable of servicing requests that don't match any rule. At least one of 'backend' or 'rules' must be specified. This field is optional to allow the loadbalancer controller or defaulting logic to specify a global default.",
-	"tls":     "TLS configuration. Currently the Ingress only supports a single TLS port, 443. If multiple members of this list specify different hosts, they will be multiplexed on the same port according to the hostname specified through the SNI TLS extension, if the ingress controller fulfilling the ingress supports SNI.",
-	"rules":   "A list of host rules used to configure the Ingress. If unspecified, or no rule matches, all traffic is sent to the default backend.",
+	"":                 "IngressSpec describes the Ingress the user wishes to exist.",
+	"ingressClassName": "IngressClassName is the name of the IngressClass cluster resource. The associated IngressClass defines which controller will implement the resource. This replaces the deprecated `kubernetes.io/ingress.class` annotation. For backwards compatibility, when that annotation is set, it must be given precedence over this field. The controller may emit a warning if the field and annotation have different values. Implementations of this API should ignore Ingresses without a class specified. An IngressClass resource may be marked as default, which can be used to set a default value for this field. For more information, refer to the IngressClass documentation.",
+	"backend":          "A default backend capable of servicing requests that don't match any rule. At least one of 'backend' or 'rules' must be specified. This field is optional to allow the loadbalancer controller or defaulting logic to specify a global default.",
+	"tls":              "TLS configuration. Currently the Ingress only supports a single TLS port, 443. If multiple members of this list specify different hosts, they will be multiplexed on the same port according to the hostname specified through the SNI TLS extension, if the ingress controller fulfilling the ingress supports SNI.",
+	"rules":            "A list of host rules used to configure the Ingress. If unspecified, or no rule matches, all traffic is sent to the default backend.",
 }
 
 func (IngressSpec) SwaggerDoc() map[string]string {
@@ -541,14 +544,6 @@ func (ReplicaSetStatus) SwaggerDoc() map[string]string {
 	return map_ReplicaSetStatus
 }
 
-var map_ReplicationControllerDummy = map[string]string{
-	"": "Dummy definition",
-}
-
-func (ReplicationControllerDummy) SwaggerDoc() map[string]string {
-	return map_ReplicationControllerDummy
-}
-
 var map_RollbackConfig = map[string]string{
 	"":         "DEPRECATED.",
 	"revision": "The revision to rollback to. If set to 0, rollback to the last revision.",
diff --git a/vendor/k8s.io/api/extensions/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/extensions/v1beta1/zz_generated.deepcopy.go
index cb610179..913f4851 100644
--- a/vendor/k8s.io/api/extensions/v1beta1/zz_generated.deepcopy.go
+++ b/vendor/k8s.io/api/extensions/v1beta1/zz_generated.deepcopy.go
@@ -458,7 +458,12 @@ func (in *FSGroupStrategyOptions) DeepCopy() *FSGroupStrategyOptions {
 // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
 func (in *HTTPIngressPath) DeepCopyInto(out *HTTPIngressPath) {
 	*out = *in
-	out.Backend = in.Backend
+	if in.PathType != nil {
+		in, out := &in.PathType, &out.PathType
+		*out = new(PathType)
+		**out = **in
+	}
+	in.Backend.DeepCopyInto(&out.Backend)
 	return
 }
 
@@ -478,7 +483,9 @@ func (in *HTTPIngressRuleValue) DeepCopyInto(out *HTTPIngressRuleValue) {
 	if in.Paths != nil {
 		in, out := &in.Paths, &out.Paths
 		*out = make([]HTTPIngressPath, len(*in))
-		copy(*out, *in)
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
 	}
 	return
 }
@@ -578,6 +585,11 @@ func (in *Ingress) DeepCopyObject() runtime.Object {
 func (in *IngressBackend) DeepCopyInto(out *IngressBackend) {
 	*out = *in
 	out.ServicePort = in.ServicePort
+	if in.Resource != nil {
+		in, out := &in.Resource, &out.Resource
+		*out = new(corev1.TypedLocalObjectReference)
+		(*in).DeepCopyInto(*out)
+	}
 	return
 }
 
@@ -665,10 +677,15 @@ func (in *IngressRuleValue) DeepCopy() *IngressRuleValue {
 // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
 func (in *IngressSpec) DeepCopyInto(out *IngressSpec) {
 	*out = *in
+	if in.IngressClassName != nil {
+		in, out := &in.IngressClassName, &out.IngressClassName
+		*out = new(string)
+		**out = **in
+	}
 	if in.Backend != nil {
 		in, out := &in.Backend, &out.Backend
 		*out = new(IngressBackend)
-		**out = **in
+		(*in).DeepCopyInto(*out)
 	}
 	if in.TLS != nil {
 		in, out := &in.TLS, &out.TLS
@@ -1231,31 +1248,6 @@ func (in *ReplicaSetStatus) DeepCopy() *ReplicaSetStatus {
 	return out
 }
 
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *ReplicationControllerDummy) DeepCopyInto(out *ReplicationControllerDummy) {
-	*out = *in
-	out.TypeMeta = in.TypeMeta
-	return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicationControllerDummy.
-func (in *ReplicationControllerDummy) DeepCopy() *ReplicationControllerDummy {
-	if in == nil {
-		return nil
-	}
-	out := new(ReplicationControllerDummy)
-	in.DeepCopyInto(out)
-	return out
-}
-
-// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
-func (in *ReplicationControllerDummy) DeepCopyObject() runtime.Object {
-	if c := in.DeepCopy(); c != nil {
-		return c
-	}
-	return nil
-}
-
 // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
 func (in *RollbackConfig) DeepCopyInto(out *RollbackConfig) {
 	*out = *in
diff --git a/vendor/k8s.io/api/flowcontrol/v1alpha1/generated.pb.go b/vendor/k8s.io/api/flowcontrol/v1alpha1/generated.pb.go
index d44ec3c9..86c86120 100644
--- a/vendor/k8s.io/api/flowcontrol/v1alpha1/generated.pb.go
+++ b/vendor/k8s.io/api/flowcontrol/v1alpha1/generated.pb.go
@@ -41,7 +41,7 @@ var _ = math.Inf
 // is compatible with the proto package it is being compiled against.
 // A compilation error at this line likely means your copy of the
 // proto package needs to be updated.
-const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
+const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
 
 func (m *FlowDistinguisherMethod) Reset()      { *m = FlowDistinguisherMethod{} }
 func (*FlowDistinguisherMethod) ProtoMessage() {}
@@ -5350,6 +5350,7 @@ func (m *UserSubject) Unmarshal(dAtA []byte) error {
 func skipGenerated(dAtA []byte) (n int, err error) {
 	l := len(dAtA)
 	iNdEx := 0
+	depth := 0
 	for iNdEx < l {
 		var wire uint64
 		for shift := uint(0); ; shift += 7 {
@@ -5381,10 +5382,8 @@ func skipGenerated(dAtA []byte) (n int, err error) {
 					break
 				}
 			}
-			return iNdEx, nil
 		case 1:
 			iNdEx += 8
-			return iNdEx, nil
 		case 2:
 			var length int
 			for shift := uint(0); ; shift += 7 {
@@ -5405,55 +5404,30 @@ func skipGenerated(dAtA []byte) (n int, err error) {
 				return 0, ErrInvalidLengthGenerated
 			}
 			iNdEx += length
-			if iNdEx < 0 {
-				return 0, ErrInvalidLengthGenerated
-			}
-			return iNdEx, nil
 		case 3:
-			for {
-				var innerWire uint64
-				var start int = iNdEx
-				for shift := uint(0); ; shift += 7 {
-					if shift >= 64 {
-						return 0, ErrIntOverflowGenerated
-					}
-					if iNdEx >= l {
-						return 0, io.ErrUnexpectedEOF
-					}
-					b := dAtA[iNdEx]
-					iNdEx++
-					innerWire |= (uint64(b) & 0x7F) << shift
-					if b < 0x80 {
-						break
-					}
-				}
-				innerWireType := int(innerWire & 0x7)
-				if innerWireType == 4 {
-					break
-				}
-				next, err := skipGenerated(dAtA[start:])
-				if err != nil {
-					return 0, err
-				}
-				iNdEx = start + next
-				if iNdEx < 0 {
-					return 0, ErrInvalidLengthGenerated
-				}
-			}
-			return iNdEx, nil
+			depth++
 		case 4:
-			return iNdEx, nil
+			if depth == 0 {
+				return 0, ErrUnexpectedEndOfGroupGenerated
+			}
+			depth--
 		case 5:
 			iNdEx += 4
-			return iNdEx, nil
 		default:
 			return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
 		}
+		if iNdEx < 0 {
+			return 0, ErrInvalidLengthGenerated
+		}
+		if depth == 0 {
+			return iNdEx, nil
+		}
 	}
-	panic("unreachable")
+	return 0, io.ErrUnexpectedEOF
 }
 
 var (
-	ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling")
-	ErrIntOverflowGenerated   = fmt.Errorf("proto: integer overflow")
+	ErrInvalidLengthGenerated        = fmt.Errorf("proto: negative length found during unmarshaling")
+	ErrIntOverflowGenerated          = fmt.Errorf("proto: integer overflow")
+	ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group")
 )
diff --git a/vendor/k8s.io/api/flowcontrol/v1alpha1/generated.proto b/vendor/k8s.io/api/flowcontrol/v1alpha1/generated.proto
index 6134b5e6..b8054528 100644
--- a/vendor/k8s.io/api/flowcontrol/v1alpha1/generated.proto
+++ b/vendor/k8s.io/api/flowcontrol/v1alpha1/generated.proto
@@ -84,7 +84,7 @@ message FlowSchemaList {
   optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
 
   // `items` is a list of FlowSchemas.
-  // +listType=set
+  // +listType=atomic
   repeated FlowSchema items = 2;
 }
 
@@ -97,8 +97,8 @@ message FlowSchemaSpec {
 
   // `matchingPrecedence` is used to choose among the FlowSchemas that match a given request. The chosen
   // FlowSchema is among those with the numerically lowest (which we take to be logically highest)
-  // MatchingPrecedence.  Each MatchingPrecedence value must be non-negative.
-  // Note that if the precedence is not specified or zero, it will be set to 1000 as default.
+  // MatchingPrecedence.  Each MatchingPrecedence value must be ranged in [1,10000].
+  // Note that if the precedence is not specified, it will be set to 1000 as default.
   // +optional
   optional int32 matchingPrecedence = 2;
 
@@ -110,7 +110,7 @@ message FlowSchemaSpec {
   // `rules` describes which requests will match this flow schema. This FlowSchema matches a request if and only if
   // at least one member of rules matches the request.
   // if it is an empty slice, there will be no requests matching the FlowSchema.
-  // +listType=set
+  // +listType=atomic
   // +optional
   repeated PolicyRulesWithSubjects rules = 4;
 }
@@ -210,20 +210,20 @@ message PolicyRulesWithSubjects {
   // subjects is the list of normal user, serviceaccount, or group that this rule cares about.
   // There must be at least one member in this slice.
   // A slice that includes both the system:authenticated and system:unauthenticated user groups matches every request.
-  // +listType=set
+  // +listType=atomic
   // Required.
   repeated Subject subjects = 1;
 
   // `resourceRules` is a slice of ResourcePolicyRules that identify matching requests according to their verb and the
   // target resource.
   // At least one of `resourceRules` and `nonResourceRules` has to be non-empty.
-  // +listType=set
+  // +listType=atomic
   // +optional
   repeated ResourcePolicyRule resourceRules = 2;
 
   // `nonResourceRules` is a list of NonResourcePolicyRules that identify matching requests according to their verb
   // and the target non-resource URL.
-  // +listType=set
+  // +listType=atomic
   // +optional
   repeated NonResourcePolicyRule nonResourceRules = 3;
 }
@@ -275,7 +275,7 @@ message PriorityLevelConfigurationList {
   optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
 
   // `items` is a list of request-priorities.
-  // +listType=set
+  // +listType=atomic
   repeated PriorityLevelConfiguration items = 2;
 }
 
diff --git a/vendor/k8s.io/api/flowcontrol/v1alpha1/types.go b/vendor/k8s.io/api/flowcontrol/v1alpha1/types.go
index 41073bdc..16bcf819 100644
--- a/vendor/k8s.io/api/flowcontrol/v1alpha1/types.go
+++ b/vendor/k8s.io/api/flowcontrol/v1alpha1/types.go
@@ -33,7 +33,10 @@ const (
 
 // System preset priority level names
 const (
-	PriorityLevelConfigurationNameExempt = "exempt"
+	PriorityLevelConfigurationNameExempt   = "exempt"
+	PriorityLevelConfigurationNameCatchAll = "catch-all"
+	FlowSchemaNameExempt                   = "exempt"
+	FlowSchemaNameCatchAll                 = "catch-all"
 )
 
 // Conditions
@@ -43,6 +46,11 @@ const (
 	PriorityLevelConfigurationConditionConcurrencyShared = "ConcurrencyShared"
 )
 
+// Constants used by api validation.
+const (
+	FlowSchemaMaxMatchingPrecedence int32 = 10000
+)
+
 // +genclient
 // +genclient:nonNamespaced
 // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
@@ -76,7 +84,7 @@ type FlowSchemaList struct {
 	metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
 
 	// `items` is a list of FlowSchemas.
-	// +listType=set
+	// +listType=atomic
 	Items []FlowSchema `json:"items" protobuf:"bytes,2,rep,name=items"`
 }
 
@@ -88,8 +96,8 @@ type FlowSchemaSpec struct {
 	PriorityLevelConfiguration PriorityLevelConfigurationReference `json:"priorityLevelConfiguration" protobuf:"bytes,1,opt,name=priorityLevelConfiguration"`
 	// `matchingPrecedence` is used to choose among the FlowSchemas that match a given request. The chosen
 	// FlowSchema is among those with the numerically lowest (which we take to be logically highest)
-	// MatchingPrecedence.  Each MatchingPrecedence value must be non-negative.
-	// Note that if the precedence is not specified or zero, it will be set to 1000 as default.
+	// MatchingPrecedence.  Each MatchingPrecedence value must be ranged in [1,10000].
+	// Note that if the precedence is not specified, it will be set to 1000 as default.
 	// +optional
 	MatchingPrecedence int32 `json:"matchingPrecedence" protobuf:"varint,2,opt,name=matchingPrecedence"`
 	// `distinguisherMethod` defines how to compute the flow distinguisher for requests that match this schema.
@@ -99,7 +107,7 @@ type FlowSchemaSpec struct {
 	// `rules` describes which requests will match this flow schema. This FlowSchema matches a request if and only if
 	// at least one member of rules matches the request.
 	// if it is an empty slice, there will be no requests matching the FlowSchema.
-	// +listType=set
+	// +listType=atomic
 	// +optional
 	Rules []PolicyRulesWithSubjects `json:"rules,omitempty" protobuf:"bytes,4,rep,name=rules"`
 }
@@ -144,18 +152,18 @@ type PolicyRulesWithSubjects struct {
 	// subjects is the list of normal user, serviceaccount, or group that this rule cares about.
 	// There must be at least one member in this slice.
 	// A slice that includes both the system:authenticated and system:unauthenticated user groups matches every request.
-	// +listType=set
+	// +listType=atomic
 	// Required.
 	Subjects []Subject `json:"subjects" protobuf:"bytes,1,rep,name=subjects"`
 	// `resourceRules` is a slice of ResourcePolicyRules that identify matching requests according to their verb and the
 	// target resource.
 	// At least one of `resourceRules` and `nonResourceRules` has to be non-empty.
-	// +listType=set
+	// +listType=atomic
 	// +optional
 	ResourceRules []ResourcePolicyRule `json:"resourceRules,omitempty" protobuf:"bytes,2,opt,name=resourceRules"`
 	// `nonResourceRules` is a list of NonResourcePolicyRules that identify matching requests according to their verb
 	// and the target non-resource URL.
-	// +listType=set
+	// +listType=atomic
 	// +optional
 	NonResourceRules []NonResourcePolicyRule `json:"nonResourceRules,omitempty" protobuf:"bytes,3,opt,name=nonResourceRules"`
 }
@@ -342,7 +350,7 @@ type PriorityLevelConfigurationList struct {
 	// +optional
 	metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
 	// `items` is a list of request-priorities.
-	// +listType=set
+	// +listType=atomic
 	Items []PriorityLevelConfiguration `json:"items" protobuf:"bytes,2,rep,name=items"`
 }
 
diff --git a/vendor/k8s.io/api/flowcontrol/v1alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/api/flowcontrol/v1alpha1/types_swagger_doc_generated.go
index 08380a1b..ffbee2e3 100644
--- a/vendor/k8s.io/api/flowcontrol/v1alpha1/types_swagger_doc_generated.go
+++ b/vendor/k8s.io/api/flowcontrol/v1alpha1/types_swagger_doc_generated.go
@@ -73,7 +73,7 @@ func (FlowSchemaList) SwaggerDoc() map[string]string {
 var map_FlowSchemaSpec = map[string]string{
 	"":                           "FlowSchemaSpec describes how the FlowSchema's specification looks like.",
 	"priorityLevelConfiguration": "`priorityLevelConfiguration` should reference a PriorityLevelConfiguration in the cluster. If the reference cannot be resolved, the FlowSchema will be ignored and marked as invalid in its status. Required.",
-	"matchingPrecedence":         "`matchingPrecedence` is used to choose among the FlowSchemas that match a given request. The chosen FlowSchema is among those with the numerically lowest (which we take to be logically highest) MatchingPrecedence.  Each MatchingPrecedence value must be non-negative. Note that if the precedence is not specified or zero, it will be set to 1000 as default.",
+	"matchingPrecedence":         "`matchingPrecedence` is used to choose among the FlowSchemas that match a given request. The chosen FlowSchema is among those with the numerically lowest (which we take to be logically highest) MatchingPrecedence.  Each MatchingPrecedence value must be ranged in [1,10000]. Note that if the precedence is not specified, it will be set to 1000 as default.",
 	"distinguisherMethod":        "`distinguisherMethod` defines how to compute the flow distinguisher for requests that match this schema. `nil` specifies that the distinguisher is disabled and thus will always be the empty string.",
 	"rules":                      "`rules` describes which requests will match this flow schema. This FlowSchema matches a request if and only if at least one member of rules matches the request. if it is an empty slice, there will be no requests matching the FlowSchema.",
 }
diff --git a/vendor/k8s.io/api/imagepolicy/v1alpha1/doc.go b/vendor/k8s.io/api/imagepolicy/v1alpha1/doc.go
new file mode 100644
index 00000000..5db6d52d
--- /dev/null
+++ b/vendor/k8s.io/api/imagepolicy/v1alpha1/doc.go
@@ -0,0 +1,23 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// +k8s:deepcopy-gen=package
+// +k8s:protobuf-gen=package
+// +k8s:openapi-gen=true
+
+// +groupName=imagepolicy.k8s.io
+
+package v1alpha1 // import "k8s.io/api/imagepolicy/v1alpha1"
diff --git a/vendor/k8s.io/api/imagepolicy/v1alpha1/generated.pb.go b/vendor/k8s.io/api/imagepolicy/v1alpha1/generated.pb.go
new file mode 100644
index 00000000..e5688513
--- /dev/null
+++ b/vendor/k8s.io/api/imagepolicy/v1alpha1/generated.pb.go
@@ -0,0 +1,1386 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by protoc-gen-gogo. DO NOT EDIT.
+// source: k8s.io/kubernetes/vendor/k8s.io/api/imagepolicy/v1alpha1/generated.proto
+
+package v1alpha1
+
+import (
+	fmt "fmt"
+
+	io "io"
+
+	proto "github.com/gogo/protobuf/proto"
+	github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys"
+
+	math "math"
+	math_bits "math/bits"
+	reflect "reflect"
+	strings "strings"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
+
+func (m *ImageReview) Reset()      { *m = ImageReview{} }
+func (*ImageReview) ProtoMessage() {}
+func (*ImageReview) Descriptor() ([]byte, []int) {
+	return fileDescriptor_834793af728657a5, []int{0}
+}
+func (m *ImageReview) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *ImageReview) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *ImageReview) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ImageReview.Merge(m, src)
+}
+func (m *ImageReview) XXX_Size() int {
+	return m.Size()
+}
+func (m *ImageReview) XXX_DiscardUnknown() {
+	xxx_messageInfo_ImageReview.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ImageReview proto.InternalMessageInfo
+
+func (m *ImageReviewContainerSpec) Reset()      { *m = ImageReviewContainerSpec{} }
+func (*ImageReviewContainerSpec) ProtoMessage() {}
+func (*ImageReviewContainerSpec) Descriptor() ([]byte, []int) {
+	return fileDescriptor_834793af728657a5, []int{1}
+}
+func (m *ImageReviewContainerSpec) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *ImageReviewContainerSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *ImageReviewContainerSpec) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ImageReviewContainerSpec.Merge(m, src)
+}
+func (m *ImageReviewContainerSpec) XXX_Size() int {
+	return m.Size()
+}
+func (m *ImageReviewContainerSpec) XXX_DiscardUnknown() {
+	xxx_messageInfo_ImageReviewContainerSpec.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ImageReviewContainerSpec proto.InternalMessageInfo
+
+func (m *ImageReviewSpec) Reset()      { *m = ImageReviewSpec{} }
+func (*ImageReviewSpec) ProtoMessage() {}
+func (*ImageReviewSpec) Descriptor() ([]byte, []int) {
+	return fileDescriptor_834793af728657a5, []int{2}
+}
+func (m *ImageReviewSpec) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *ImageReviewSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *ImageReviewSpec) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ImageReviewSpec.Merge(m, src)
+}
+func (m *ImageReviewSpec) XXX_Size() int {
+	return m.Size()
+}
+func (m *ImageReviewSpec) XXX_DiscardUnknown() {
+	xxx_messageInfo_ImageReviewSpec.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ImageReviewSpec proto.InternalMessageInfo
+
+func (m *ImageReviewStatus) Reset()      { *m = ImageReviewStatus{} }
+func (*ImageReviewStatus) ProtoMessage() {}
+func (*ImageReviewStatus) Descriptor() ([]byte, []int) {
+	return fileDescriptor_834793af728657a5, []int{3}
+}
+func (m *ImageReviewStatus) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *ImageReviewStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *ImageReviewStatus) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ImageReviewStatus.Merge(m, src)
+}
+func (m *ImageReviewStatus) XXX_Size() int {
+	return m.Size()
+}
+func (m *ImageReviewStatus) XXX_DiscardUnknown() {
+	xxx_messageInfo_ImageReviewStatus.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ImageReviewStatus proto.InternalMessageInfo
+
+func init() {
+	proto.RegisterType((*ImageReview)(nil), "k8s.io.api.imagepolicy.v1alpha1.ImageReview")
+	proto.RegisterType((*ImageReviewContainerSpec)(nil), "k8s.io.api.imagepolicy.v1alpha1.ImageReviewContainerSpec")
+	proto.RegisterType((*ImageReviewSpec)(nil), "k8s.io.api.imagepolicy.v1alpha1.ImageReviewSpec")
+	proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.imagepolicy.v1alpha1.ImageReviewSpec.AnnotationsEntry")
+	proto.RegisterType((*ImageReviewStatus)(nil), "k8s.io.api.imagepolicy.v1alpha1.ImageReviewStatus")
+	proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.imagepolicy.v1alpha1.ImageReviewStatus.AuditAnnotationsEntry")
+}
+
+func init() {
+	proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/imagepolicy/v1alpha1/generated.proto", fileDescriptor_834793af728657a5)
+}
+
+var fileDescriptor_834793af728657a5 = []byte{
+	// 607 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x92, 0xcf, 0x6e, 0xd3, 0x4c,
+	0x14, 0xc5, 0xe3, 0xa4, 0xff, 0x32, 0xf9, 0x3e, 0x9a, 0x0e, 0x20, 0x59, 0x59, 0xb8, 0x55, 0x90,
+	0x50, 0x59, 0x30, 0x43, 0x2b, 0x84, 0x0a, 0x0b, 0x50, 0x5c, 0x21, 0x95, 0x05, 0x20, 0x0d, 0xbb,
+	0xae, 0x98, 0x38, 0x17, 0xc7, 0x24, 0x9e, 0xb1, 0x3c, 0xe3, 0x94, 0xec, 0x78, 0x02, 0xc4, 0x1b,
+	0xf0, 0x22, 0x3c, 0x40, 0x97, 0x5d, 0x76, 0x55, 0x51, 0xb3, 0xe4, 0x25, 0x90, 0xc7, 0x4e, 0x6c,
+	0x92, 0x22, 0x94, 0x9d, 0xef, 0xbd, 0x73, 0x7e, 0xf7, 0xcc, 0xf1, 0xa0, 0x93, 0xd1, 0x91, 0x22,
+	0x81, 0xa4, 0xa3, 0xa4, 0x0f, 0xb1, 0x00, 0x0d, 0x8a, 0x4e, 0x40, 0x0c, 0x64, 0x4c, 0x8b, 0x01,
+	0x8f, 0x02, 0x1a, 0x84, 0xdc, 0x87, 0x48, 0x8e, 0x03, 0x6f, 0x4a, 0x27, 0x07, 0x7c, 0x1c, 0x0d,
+	0xf9, 0x01, 0xf5, 0x41, 0x40, 0xcc, 0x35, 0x0c, 0x48, 0x14, 0x4b, 0x2d, 0xf1, 0x6e, 0x2e, 0x20,
+	0x3c, 0x0a, 0x48, 0x45, 0x40, 0x66, 0x82, 0xce, 0x43, 0x3f, 0xd0, 0xc3, 0xa4, 0x4f, 0x3c, 0x19,
+	0x52, 0x5f, 0xfa, 0x92, 0x1a, 0x5d, 0x3f, 0xf9, 0x60, 0x2a, 0x53, 0x98, 0xaf, 0x9c, 0xd7, 0x79,
+	0x5c, 0x1a, 0x08, 0xb9, 0x37, 0x0c, 0x04, 0xc4, 0x53, 0x1a, 0x8d, 0xfc, 0xac, 0xa1, 0x68, 0x08,
+	0x9a, 0xd3, 0xc9, 0x92, 0x8b, 0x0e, 0xfd, 0x9b, 0x2a, 0x4e, 0x84, 0x0e, 0x42, 0x58, 0x12, 0x3c,
+	0xf9, 0x97, 0x40, 0x79, 0x43, 0x08, 0xf9, 0xa2, 0xae, 0xfb, 0xad, 0x8e, 0x5a, 0xaf, 0xb2, 0x6b,
+	0x32, 0x98, 0x04, 0x70, 0x86, 0xdf, 0xa3, 0xad, 0xcc, 0xd3, 0x80, 0x6b, 0x6e, 0x5b, 0x7b, 0xd6,
+	0x7e, 0xeb, 0xf0, 0x11, 0x29, 0x13, 0x99, 0xa3, 0x49, 0x34, 0xf2, 0xb3, 0x86, 0x22, 0xd9, 0x69,
+	0x32, 0x39, 0x20, 0x6f, 0xfb, 0x1f, 0xc1, 0xd3, 0xaf, 0x41, 0x73, 0x17, 0x9f, 0x5f, 0xed, 0xd6,
+	0xd2, 0xab, 0x5d, 0x54, 0xf6, 0xd8, 0x9c, 0x8a, 0x19, 0x5a, 0x53, 0x11, 0x78, 0x76, 0x7d, 0x89,
+	0x7e, 0x63, 0xde, 0xa4, 0xe2, 0xee, 0x5d, 0x04, 0x9e, 0xfb, 0x5f, 0x41, 0x5f, 0xcb, 0x2a, 0x66,
+	0x58, 0xf8, 0x14, 0x6d, 0x28, 0xcd, 0x75, 0xa2, 0xec, 0x86, 0xa1, 0x1e, 0xae, 0x44, 0x35, 0x4a,
+	0xf7, 0x56, 0xc1, 0xdd, 0xc8, 0x6b, 0x56, 0x10, 0xbb, 0x2f, 0x90, 0x5d, 0x39, 0x7c, 0x2c, 0x85,
+	0xe6, 0x59, 0x04, 0xd9, 0x76, 0x7c, 0x0f, 0xad, 0x1b, 0xba, 0x89, 0xaa, 0xe9, 0xfe, 0x5f, 0x20,
+	0xd6, 0x73, 0x41, 0x3e, 0xeb, 0xfe, 0xaa, 0xa3, 0xed, 0x85, 0x4b, 0xe0, 0x10, 0x21, 0x6f, 0x46,
+	0x52, 0xb6, 0xb5, 0xd7, 0xd8, 0x6f, 0x1d, 0x3e, 0x5d, 0xc5, 0xf4, 0x1f, 0x3e, 0xca, 0xc4, 0xe7,
+	0x6d, 0xc5, 0x2a, 0x0b, 0xf0, 0x27, 0xd4, 0xe2, 0x42, 0x48, 0xcd, 0x75, 0x20, 0x85, 0xb2, 0xeb,
+	0x66, 0x5f, 0x6f, 0xd5, 0xe8, 0x49, 0xaf, 0x64, 0xbc, 0x14, 0x3a, 0x9e, 0xba, 0xb7, 0x8b, 0xbd,
+	0xad, 0xca, 0x84, 0x55, 0x57, 0x61, 0x8a, 0x9a, 0x82, 0x87, 0xa0, 0x22, 0xee, 0x81, 0xf9, 0x39,
+	0x4d, 0x77, 0xa7, 0x10, 0x35, 0xdf, 0xcc, 0x06, 0xac, 0x3c, 0xd3, 0x79, 0x8e, 0xda, 0x8b, 0x6b,
+	0x70, 0x1b, 0x35, 0x46, 0x30, 0xcd, 0x43, 0x66, 0xd9, 0x27, 0xbe, 0x83, 0xd6, 0x27, 0x7c, 0x9c,
+	0x80, 0x79, 0x45, 0x4d, 0x96, 0x17, 0xcf, 0xea, 0x47, 0x56, 0xf7, 0x7b, 0x1d, 0xed, 0x2c, 0xfd,
+	0x5c, 0xfc, 0x00, 0x6d, 0xf2, 0xf1, 0x58, 0x9e, 0xc1, 0xc0, 0x50, 0xb6, 0xdc, 0xed, 0xc2, 0xc4,
+	0x66, 0x2f, 0x6f, 0xb3, 0xd9, 0x1c, 0xdf, 0x47, 0x1b, 0x31, 0x70, 0x25, 0x45, 0xce, 0x2e, 0xdf,
+	0x05, 0x33, 0x5d, 0x56, 0x4c, 0xf1, 0x17, 0x0b, 0xb5, 0x79, 0x32, 0x08, 0x74, 0xc5, 0xae, 0xdd,
+	0x30, 0xc9, 0x9e, 0xac, 0xfe, 0xfc, 0x48, 0x6f, 0x01, 0x95, 0x07, 0x6c, 0x17, 0xcb, 0xdb, 0x8b,
+	0x63, 0xb6, 0xb4, 0xbb, 0x73, 0x8c, 0xee, 0xde, 0x08, 0x59, 0x25, 0x3e, 0x97, 0x9c, 0x5f, 0x3b,
+	0xb5, 0x8b, 0x6b, 0xa7, 0x76, 0x79, 0xed, 0xd4, 0x3e, 0xa7, 0x8e, 0x75, 0x9e, 0x3a, 0xd6, 0x45,
+	0xea, 0x58, 0x97, 0xa9, 0x63, 0xfd, 0x48, 0x1d, 0xeb, 0xeb, 0x4f, 0xa7, 0x76, 0xba, 0x35, 0xbb,
+	0xc8, 0xef, 0x00, 0x00, 0x00, 0xff, 0xff, 0x44, 0x16, 0x48, 0xa2, 0x79, 0x05, 0x00, 0x00,
+}
+
+func (m *ImageReview) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *ImageReview) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ImageReview) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	{
+		size, err := m.Status.MarshalToSizedBuffer(dAtA[:i])
+		if err != nil {
+			return 0, err
+		}
+		i -= size
+		i = encodeVarintGenerated(dAtA, i, uint64(size))
+	}
+	i--
+	dAtA[i] = 0x1a
+	{
+		size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i])
+		if err != nil {
+			return 0, err
+		}
+		i -= size
+		i = encodeVarintGenerated(dAtA, i, uint64(size))
+	}
+	i--
+	dAtA[i] = 0x12
+	{
+		size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
+		if err != nil {
+			return 0, err
+		}
+		i -= size
+		i = encodeVarintGenerated(dAtA, i, uint64(size))
+	}
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *ImageReviewContainerSpec) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *ImageReviewContainerSpec) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ImageReviewContainerSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	i -= len(m.Image)
+	copy(dAtA[i:], m.Image)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Image)))
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *ImageReviewSpec) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *ImageReviewSpec) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ImageReviewSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	i -= len(m.Namespace)
+	copy(dAtA[i:], m.Namespace)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace)))
+	i--
+	dAtA[i] = 0x1a
+	if len(m.Annotations) > 0 {
+		keysForAnnotations := make([]string, 0, len(m.Annotations))
+		for k := range m.Annotations {
+			keysForAnnotations = append(keysForAnnotations, string(k))
+		}
+		github_com_gogo_protobuf_sortkeys.Strings(keysForAnnotations)
+		for iNdEx := len(keysForAnnotations) - 1; iNdEx >= 0; iNdEx-- {
+			v := m.Annotations[string(keysForAnnotations[iNdEx])]
+			baseI := i
+			i -= len(v)
+			copy(dAtA[i:], v)
+			i = encodeVarintGenerated(dAtA, i, uint64(len(v)))
+			i--
+			dAtA[i] = 0x12
+			i -= len(keysForAnnotations[iNdEx])
+			copy(dAtA[i:], keysForAnnotations[iNdEx])
+			i = encodeVarintGenerated(dAtA, i, uint64(len(keysForAnnotations[iNdEx])))
+			i--
+			dAtA[i] = 0xa
+			i = encodeVarintGenerated(dAtA, i, uint64(baseI-i))
+			i--
+			dAtA[i] = 0x12
+		}
+	}
+	if len(m.Containers) > 0 {
+		for iNdEx := len(m.Containers) - 1; iNdEx >= 0; iNdEx-- {
+			{
+				size, err := m.Containers[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintGenerated(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0xa
+		}
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *ImageReviewStatus) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *ImageReviewStatus) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ImageReviewStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if len(m.AuditAnnotations) > 0 {
+		keysForAuditAnnotations := make([]string, 0, len(m.AuditAnnotations))
+		for k := range m.AuditAnnotations {
+			keysForAuditAnnotations = append(keysForAuditAnnotations, string(k))
+		}
+		github_com_gogo_protobuf_sortkeys.Strings(keysForAuditAnnotations)
+		for iNdEx := len(keysForAuditAnnotations) - 1; iNdEx >= 0; iNdEx-- {
+			v := m.AuditAnnotations[string(keysForAuditAnnotations[iNdEx])]
+			baseI := i
+			i -= len(v)
+			copy(dAtA[i:], v)
+			i = encodeVarintGenerated(dAtA, i, uint64(len(v)))
+			i--
+			dAtA[i] = 0x12
+			i -= len(keysForAuditAnnotations[iNdEx])
+			copy(dAtA[i:], keysForAuditAnnotations[iNdEx])
+			i = encodeVarintGenerated(dAtA, i, uint64(len(keysForAuditAnnotations[iNdEx])))
+			i--
+			dAtA[i] = 0xa
+			i = encodeVarintGenerated(dAtA, i, uint64(baseI-i))
+			i--
+			dAtA[i] = 0x1a
+		}
+	}
+	i -= len(m.Reason)
+	copy(dAtA[i:], m.Reason)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason)))
+	i--
+	dAtA[i] = 0x12
+	i--
+	if m.Allowed {
+		dAtA[i] = 1
+	} else {
+		dAtA[i] = 0
+	}
+	i--
+	dAtA[i] = 0x8
+	return len(dAtA) - i, nil
+}
+
+func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int {
+	offset -= sovGenerated(v)
+	base := offset
+	for v >= 1<<7 {
+		dAtA[offset] = uint8(v&0x7f | 0x80)
+		v >>= 7
+		offset++
+	}
+	dAtA[offset] = uint8(v)
+	return base
+}
+func (m *ImageReview) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = m.ObjectMeta.Size()
+	n += 1 + l + sovGenerated(uint64(l))
+	l = m.Spec.Size()
+	n += 1 + l + sovGenerated(uint64(l))
+	l = m.Status.Size()
+	n += 1 + l + sovGenerated(uint64(l))
+	return n
+}
+
+func (m *ImageReviewContainerSpec) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.Image)
+	n += 1 + l + sovGenerated(uint64(l))
+	return n
+}
+
+func (m *ImageReviewSpec) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if len(m.Containers) > 0 {
+		for _, e := range m.Containers {
+			l = e.Size()
+			n += 1 + l + sovGenerated(uint64(l))
+		}
+	}
+	if len(m.Annotations) > 0 {
+		for k, v := range m.Annotations {
+			_ = k
+			_ = v
+			mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v)))
+			n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize))
+		}
+	}
+	l = len(m.Namespace)
+	n += 1 + l + sovGenerated(uint64(l))
+	return n
+}
+
+func (m *ImageReviewStatus) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	n += 2
+	l = len(m.Reason)
+	n += 1 + l + sovGenerated(uint64(l))
+	if len(m.AuditAnnotations) > 0 {
+		for k, v := range m.AuditAnnotations {
+			_ = k
+			_ = v
+			mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v)))
+			n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize))
+		}
+	}
+	return n
+}
+
+func sovGenerated(x uint64) (n int) {
+	return (math_bits.Len64(x|1) + 6) / 7
+}
+func sozGenerated(x uint64) (n int) {
+	return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (this *ImageReview) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&ImageReview{`,
+		`ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`,
+		`Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ImageReviewSpec", "ImageReviewSpec", 1), `&`, ``, 1) + `,`,
+		`Status:` + strings.Replace(strings.Replace(this.Status.String(), "ImageReviewStatus", "ImageReviewStatus", 1), `&`, ``, 1) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *ImageReviewContainerSpec) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&ImageReviewContainerSpec{`,
+		`Image:` + fmt.Sprintf("%v", this.Image) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *ImageReviewSpec) String() string {
+	if this == nil {
+		return "nil"
+	}
+	repeatedStringForContainers := "[]ImageReviewContainerSpec{"
+	for _, f := range this.Containers {
+		repeatedStringForContainers += strings.Replace(strings.Replace(f.String(), "ImageReviewContainerSpec", "ImageReviewContainerSpec", 1), `&`, ``, 1) + ","
+	}
+	repeatedStringForContainers += "}"
+	keysForAnnotations := make([]string, 0, len(this.Annotations))
+	for k := range this.Annotations {
+		keysForAnnotations = append(keysForAnnotations, k)
+	}
+	github_com_gogo_protobuf_sortkeys.Strings(keysForAnnotations)
+	mapStringForAnnotations := "map[string]string{"
+	for _, k := range keysForAnnotations {
+		mapStringForAnnotations += fmt.Sprintf("%v: %v,", k, this.Annotations[k])
+	}
+	mapStringForAnnotations += "}"
+	s := strings.Join([]string{`&ImageReviewSpec{`,
+		`Containers:` + repeatedStringForContainers + `,`,
+		`Annotations:` + mapStringForAnnotations + `,`,
+		`Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *ImageReviewStatus) String() string {
+	if this == nil {
+		return "nil"
+	}
+	keysForAuditAnnotations := make([]string, 0, len(this.AuditAnnotations))
+	for k := range this.AuditAnnotations {
+		keysForAuditAnnotations = append(keysForAuditAnnotations, k)
+	}
+	github_com_gogo_protobuf_sortkeys.Strings(keysForAuditAnnotations)
+	mapStringForAuditAnnotations := "map[string]string{"
+	for _, k := range keysForAuditAnnotations {
+		mapStringForAuditAnnotations += fmt.Sprintf("%v: %v,", k, this.AuditAnnotations[k])
+	}
+	mapStringForAuditAnnotations += "}"
+	s := strings.Join([]string{`&ImageReviewStatus{`,
+		`Allowed:` + fmt.Sprintf("%v", this.Allowed) + `,`,
+		`Reason:` + fmt.Sprintf("%v", this.Reason) + `,`,
+		`AuditAnnotations:` + mapStringForAuditAnnotations + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func valueToStringGenerated(v interface{}) string {
+	rv := reflect.ValueOf(v)
+	if rv.IsNil() {
+		return "nil"
+	}
+	pv := reflect.Indirect(rv).Interface()
+	return fmt.Sprintf("*%v", pv)
+}
+func (m *ImageReview) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: ImageReview: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: ImageReview: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *ImageReviewContainerSpec) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: ImageReviewContainerSpec: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: ImageReviewContainerSpec: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Image", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Image = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *ImageReviewSpec) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: ImageReviewSpec: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: ImageReviewSpec: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Containers", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Containers = append(m.Containers, ImageReviewContainerSpec{})
+			if err := m.Containers[len(m.Containers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Annotations", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Annotations == nil {
+				m.Annotations = make(map[string]string)
+			}
+			var mapkey string
+			var mapvalue string
+			for iNdEx < postIndex {
+				entryPreIndex := iNdEx
+				var wire uint64
+				for shift := uint(0); ; shift += 7 {
+					if shift >= 64 {
+						return ErrIntOverflowGenerated
+					}
+					if iNdEx >= l {
+						return io.ErrUnexpectedEOF
+					}
+					b := dAtA[iNdEx]
+					iNdEx++
+					wire |= uint64(b&0x7F) << shift
+					if b < 0x80 {
+						break
+					}
+				}
+				fieldNum := int32(wire >> 3)
+				if fieldNum == 1 {
+					var stringLenmapkey uint64
+					for shift := uint(0); ; shift += 7 {
+						if shift >= 64 {
+							return ErrIntOverflowGenerated
+						}
+						if iNdEx >= l {
+							return io.ErrUnexpectedEOF
+						}
+						b := dAtA[iNdEx]
+						iNdEx++
+						stringLenmapkey |= uint64(b&0x7F) << shift
+						if b < 0x80 {
+							break
+						}
+					}
+					intStringLenmapkey := int(stringLenmapkey)
+					if intStringLenmapkey < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					postStringIndexmapkey := iNdEx + intStringLenmapkey
+					if postStringIndexmapkey < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					if postStringIndexmapkey > l {
+						return io.ErrUnexpectedEOF
+					}
+					mapkey = string(dAtA[iNdEx:postStringIndexmapkey])
+					iNdEx = postStringIndexmapkey
+				} else if fieldNum == 2 {
+					var stringLenmapvalue uint64
+					for shift := uint(0); ; shift += 7 {
+						if shift >= 64 {
+							return ErrIntOverflowGenerated
+						}
+						if iNdEx >= l {
+							return io.ErrUnexpectedEOF
+						}
+						b := dAtA[iNdEx]
+						iNdEx++
+						stringLenmapvalue |= uint64(b&0x7F) << shift
+						if b < 0x80 {
+							break
+						}
+					}
+					intStringLenmapvalue := int(stringLenmapvalue)
+					if intStringLenmapvalue < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					postStringIndexmapvalue := iNdEx + intStringLenmapvalue
+					if postStringIndexmapvalue < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					if postStringIndexmapvalue > l {
+						return io.ErrUnexpectedEOF
+					}
+					mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue])
+					iNdEx = postStringIndexmapvalue
+				} else {
+					iNdEx = entryPreIndex
+					skippy, err := skipGenerated(dAtA[iNdEx:])
+					if err != nil {
+						return err
+					}
+					if skippy < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					if (iNdEx + skippy) > postIndex {
+						return io.ErrUnexpectedEOF
+					}
+					iNdEx += skippy
+				}
+			}
+			m.Annotations[mapkey] = mapvalue
+			iNdEx = postIndex
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Namespace = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *ImageReviewStatus) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: ImageReviewStatus: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: ImageReviewStatus: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Allowed", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.Allowed = bool(v != 0)
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Reason = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field AuditAnnotations", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.AuditAnnotations == nil {
+				m.AuditAnnotations = make(map[string]string)
+			}
+			var mapkey string
+			var mapvalue string
+			for iNdEx < postIndex {
+				entryPreIndex := iNdEx
+				var wire uint64
+				for shift := uint(0); ; shift += 7 {
+					if shift >= 64 {
+						return ErrIntOverflowGenerated
+					}
+					if iNdEx >= l {
+						return io.ErrUnexpectedEOF
+					}
+					b := dAtA[iNdEx]
+					iNdEx++
+					wire |= uint64(b&0x7F) << shift
+					if b < 0x80 {
+						break
+					}
+				}
+				fieldNum := int32(wire >> 3)
+				if fieldNum == 1 {
+					var stringLenmapkey uint64
+					for shift := uint(0); ; shift += 7 {
+						if shift >= 64 {
+							return ErrIntOverflowGenerated
+						}
+						if iNdEx >= l {
+							return io.ErrUnexpectedEOF
+						}
+						b := dAtA[iNdEx]
+						iNdEx++
+						stringLenmapkey |= uint64(b&0x7F) << shift
+						if b < 0x80 {
+							break
+						}
+					}
+					intStringLenmapkey := int(stringLenmapkey)
+					if intStringLenmapkey < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					postStringIndexmapkey := iNdEx + intStringLenmapkey
+					if postStringIndexmapkey < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					if postStringIndexmapkey > l {
+						return io.ErrUnexpectedEOF
+					}
+					mapkey = string(dAtA[iNdEx:postStringIndexmapkey])
+					iNdEx = postStringIndexmapkey
+				} else if fieldNum == 2 {
+					var stringLenmapvalue uint64
+					for shift := uint(0); ; shift += 7 {
+						if shift >= 64 {
+							return ErrIntOverflowGenerated
+						}
+						if iNdEx >= l {
+							return io.ErrUnexpectedEOF
+						}
+						b := dAtA[iNdEx]
+						iNdEx++
+						stringLenmapvalue |= uint64(b&0x7F) << shift
+						if b < 0x80 {
+							break
+						}
+					}
+					intStringLenmapvalue := int(stringLenmapvalue)
+					if intStringLenmapvalue < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					postStringIndexmapvalue := iNdEx + intStringLenmapvalue
+					if postStringIndexmapvalue < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					if postStringIndexmapvalue > l {
+						return io.ErrUnexpectedEOF
+					}
+					mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue])
+					iNdEx = postStringIndexmapvalue
+				} else {
+					iNdEx = entryPreIndex
+					skippy, err := skipGenerated(dAtA[iNdEx:])
+					if err != nil {
+						return err
+					}
+					if skippy < 0 {
+						return ErrInvalidLengthGenerated
+					}
+					if (iNdEx + skippy) > postIndex {
+						return io.ErrUnexpectedEOF
+					}
+					iNdEx += skippy
+				}
+			}
+			m.AuditAnnotations[mapkey] = mapvalue
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func skipGenerated(dAtA []byte) (n int, err error) {
+	l := len(dAtA)
+	iNdEx := 0
+	depth := 0
+	for iNdEx < l {
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return 0, ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return 0, io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		wireType := int(wire & 0x7)
+		switch wireType {
+		case 0:
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return 0, ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return 0, io.ErrUnexpectedEOF
+				}
+				iNdEx++
+				if dAtA[iNdEx-1] < 0x80 {
+					break
+				}
+			}
+		case 1:
+			iNdEx += 8
+		case 2:
+			var length int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return 0, ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return 0, io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				length |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if length < 0 {
+				return 0, ErrInvalidLengthGenerated
+			}
+			iNdEx += length
+		case 3:
+			depth++
+		case 4:
+			if depth == 0 {
+				return 0, ErrUnexpectedEndOfGroupGenerated
+			}
+			depth--
+		case 5:
+			iNdEx += 4
+		default:
+			return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+		}
+		if iNdEx < 0 {
+			return 0, ErrInvalidLengthGenerated
+		}
+		if depth == 0 {
+			return iNdEx, nil
+		}
+	}
+	return 0, io.ErrUnexpectedEOF
+}
+
+var (
+	ErrInvalidLengthGenerated        = fmt.Errorf("proto: negative length found during unmarshaling")
+	ErrIntOverflowGenerated          = fmt.Errorf("proto: integer overflow")
+	ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group")
+)
diff --git a/vendor/k8s.io/api/imagepolicy/v1alpha1/generated.proto b/vendor/k8s.io/api/imagepolicy/v1alpha1/generated.proto
new file mode 100644
index 00000000..381d0091
--- /dev/null
+++ b/vendor/k8s.io/api/imagepolicy/v1alpha1/generated.proto
@@ -0,0 +1,86 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+
+// This file was autogenerated by go-to-protobuf. Do not edit it manually!
+
+syntax = 'proto2';
+
+package k8s.io.api.imagepolicy.v1alpha1;
+
+import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto";
+import "k8s.io/apimachinery/pkg/runtime/generated.proto";
+import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto";
+
+// Package-wide variables from generator "generated".
+option go_package = "v1alpha1";
+
+// ImageReview checks if the set of images in a pod are allowed.
+message ImageReview {
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+  // Spec holds information about the pod being evaluated
+  optional ImageReviewSpec spec = 2;
+
+  // Status is filled in by the backend and indicates whether the pod should be allowed.
+  // +optional
+  optional ImageReviewStatus status = 3;
+}
+
+// ImageReviewContainerSpec is a description of a container within the pod creation request.
+message ImageReviewContainerSpec {
+  // This can be in the form image:tag or image@SHA:012345679abcdef.
+  // +optional
+  optional string image = 1;
+}
+
+// ImageReviewSpec is a description of the pod creation request.
+message ImageReviewSpec {
+  // Containers is a list of a subset of the information in each container of the Pod being created.
+  // +optional
+  repeated ImageReviewContainerSpec containers = 1;
+
+  // Annotations is a list of key-value pairs extracted from the Pod's annotations.
+  // It only includes keys which match the pattern `*.image-policy.k8s.io/*`.
+  // It is up to each webhook backend to determine how to interpret these annotations, if at all.
+  // +optional
+  map<string, string> annotations = 2;
+
+  // Namespace is the namespace the pod is being created in.
+  // +optional
+  optional string namespace = 3;
+}
+
+// ImageReviewStatus is the result of the review for the pod creation request.
+message ImageReviewStatus {
+  // Allowed indicates that all images were allowed to be run.
+  optional bool allowed = 1;
+
+  // Reason should be empty unless Allowed is false in which case it
+  // may contain a short description of what is wrong.  Kubernetes
+  // may truncate excessively long errors when displaying to the user.
+  // +optional
+  optional string reason = 2;
+
+  // AuditAnnotations will be added to the attributes object of the
+  // admission controller request using 'AddAnnotation'.  The keys should
+  // be prefix-less (i.e., the admission controller will add an
+  // appropriate prefix).
+  // +optional
+  map<string, string> auditAnnotations = 3;
+}
+
diff --git a/vendor/k8s.io/api/imagepolicy/v1alpha1/register.go b/vendor/k8s.io/api/imagepolicy/v1alpha1/register.go
new file mode 100644
index 00000000..477571bb
--- /dev/null
+++ b/vendor/k8s.io/api/imagepolicy/v1alpha1/register.go
@@ -0,0 +1,51 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1alpha1
+
+import (
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	"k8s.io/apimachinery/pkg/runtime"
+	"k8s.io/apimachinery/pkg/runtime/schema"
+)
+
+// GroupName is the group name for this API.
+const GroupName = "imagepolicy.k8s.io"
+
+// SchemeGroupVersion is group version used to register these objects
+var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha1"}
+
+// Resource takes an unqualified resource and returns a Group qualified GroupResource
+func Resource(resource string) schema.GroupResource {
+	return SchemeGroupVersion.WithResource(resource).GroupResource()
+}
+
+var (
+	// TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api.
+	// localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes.
+	SchemeBuilder      = runtime.NewSchemeBuilder(addKnownTypes)
+	localSchemeBuilder = &SchemeBuilder
+	AddToScheme        = localSchemeBuilder.AddToScheme
+)
+
+// Adds the list of known types to the given scheme.
+func addKnownTypes(scheme *runtime.Scheme) error {
+	scheme.AddKnownTypes(SchemeGroupVersion,
+		&ImageReview{},
+	)
+	metav1.AddToGroupVersion(scheme, SchemeGroupVersion)
+	return nil
+}
diff --git a/vendor/k8s.io/api/imagepolicy/v1alpha1/types.go b/vendor/k8s.io/api/imagepolicy/v1alpha1/types.go
new file mode 100644
index 00000000..fd689e63
--- /dev/null
+++ b/vendor/k8s.io/api/imagepolicy/v1alpha1/types.go
@@ -0,0 +1,80 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1alpha1
+
+import (
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+// +genclient
+// +genclient:nonNamespaced
+// +genclient:noVerbs
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// ImageReview checks if the set of images in a pod are allowed.
+type ImageReview struct {
+	metav1.TypeMeta `json:",inline"`
+	// +optional
+	metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+	// Spec holds information about the pod being evaluated
+	Spec ImageReviewSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"`
+
+	// Status is filled in by the backend and indicates whether the pod should be allowed.
+	// +optional
+	Status ImageReviewStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
+}
+
+// ImageReviewSpec is a description of the pod creation request.
+type ImageReviewSpec struct {
+	// Containers is a list of a subset of the information in each container of the Pod being created.
+	// +optional
+	Containers []ImageReviewContainerSpec `json:"containers,omitempty" protobuf:"bytes,1,rep,name=containers"`
+	// Annotations is a list of key-value pairs extracted from the Pod's annotations.
+	// It only includes keys which match the pattern `*.image-policy.k8s.io/*`.
+	// It is up to each webhook backend to determine how to interpret these annotations, if at all.
+	// +optional
+	Annotations map[string]string `json:"annotations,omitempty" protobuf:"bytes,2,rep,name=annotations"`
+	// Namespace is the namespace the pod is being created in.
+	// +optional
+	Namespace string `json:"namespace,omitempty" protobuf:"bytes,3,opt,name=namespace"`
+}
+
+// ImageReviewContainerSpec is a description of a container within the pod creation request.
+type ImageReviewContainerSpec struct {
+	// This can be in the form image:tag or image@SHA:012345679abcdef.
+	// +optional
+	Image string `json:"image,omitempty" protobuf:"bytes,1,opt,name=image"`
+	// In future, we may add command line overrides, exec health check command lines, and so on.
+}
+
+// ImageReviewStatus is the result of the review for the pod creation request.
+type ImageReviewStatus struct {
+	// Allowed indicates that all images were allowed to be run.
+	Allowed bool `json:"allowed" protobuf:"varint,1,opt,name=allowed"`
+	// Reason should be empty unless Allowed is false in which case it
+	// may contain a short description of what is wrong.  Kubernetes
+	// may truncate excessively long errors when displaying to the user.
+	// +optional
+	Reason string `json:"reason,omitempty" protobuf:"bytes,2,opt,name=reason"`
+	// AuditAnnotations will be added to the attributes object of the
+	// admission controller request using 'AddAnnotation'.  The keys should
+	// be prefix-less (i.e., the admission controller will add an
+	// appropriate prefix).
+	// +optional
+	AuditAnnotations map[string]string `json:"auditAnnotations,omitempty" protobuf:"bytes,3,rep,name=auditAnnotations"`
+}
diff --git a/vendor/k8s.io/api/imagepolicy/v1alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/api/imagepolicy/v1alpha1/types_swagger_doc_generated.go
new file mode 100644
index 00000000..0211d94a
--- /dev/null
+++ b/vendor/k8s.io/api/imagepolicy/v1alpha1/types_swagger_doc_generated.go
@@ -0,0 +1,71 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1alpha1
+
+// This file contains a collection of methods that can be used from go-restful to
+// generate Swagger API documentation for its models. Please read this PR for more
+// information on the implementation: https://github.com/emicklei/go-restful/pull/215
+//
+// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if
+// they are on one line! For multiple line or blocks that you want to ignore use ---.
+// Any context after a --- is ignored.
+//
+// Those methods can be generated by using hack/update-generated-swagger-docs.sh
+
+// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT.
+var map_ImageReview = map[string]string{
+	"":       "ImageReview checks if the set of images in a pod are allowed.",
+	"spec":   "Spec holds information about the pod being evaluated",
+	"status": "Status is filled in by the backend and indicates whether the pod should be allowed.",
+}
+
+func (ImageReview) SwaggerDoc() map[string]string {
+	return map_ImageReview
+}
+
+var map_ImageReviewContainerSpec = map[string]string{
+	"":      "ImageReviewContainerSpec is a description of a container within the pod creation request.",
+	"image": "This can be in the form image:tag or image@SHA:012345679abcdef.",
+}
+
+func (ImageReviewContainerSpec) SwaggerDoc() map[string]string {
+	return map_ImageReviewContainerSpec
+}
+
+var map_ImageReviewSpec = map[string]string{
+	"":            "ImageReviewSpec is a description of the pod creation request.",
+	"containers":  "Containers is a list of a subset of the information in each container of the Pod being created.",
+	"annotations": "Annotations is a list of key-value pairs extracted from the Pod's annotations. It only includes keys which match the pattern `*.image-policy.k8s.io/*`. It is up to each webhook backend to determine how to interpret these annotations, if at all.",
+	"namespace":   "Namespace is the namespace the pod is being created in.",
+}
+
+func (ImageReviewSpec) SwaggerDoc() map[string]string {
+	return map_ImageReviewSpec
+}
+
+var map_ImageReviewStatus = map[string]string{
+	"":                 "ImageReviewStatus is the result of the review for the pod creation request.",
+	"allowed":          "Allowed indicates that all images were allowed to be run.",
+	"reason":           "Reason should be empty unless Allowed is false in which case it may contain a short description of what is wrong.  Kubernetes may truncate excessively long errors when displaying to the user.",
+	"auditAnnotations": "AuditAnnotations will be added to the attributes object of the admission controller request using 'AddAnnotation'.  The keys should be prefix-less (i.e., the admission controller will add an appropriate prefix).",
+}
+
+func (ImageReviewStatus) SwaggerDoc() map[string]string {
+	return map_ImageReviewStatus
+}
+
+// AUTO-GENERATED FUNCTIONS END HERE
diff --git a/vendor/k8s.io/api/imagepolicy/v1alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/api/imagepolicy/v1alpha1/zz_generated.deepcopy.go
new file mode 100644
index 00000000..83d47b79
--- /dev/null
+++ b/vendor/k8s.io/api/imagepolicy/v1alpha1/zz_generated.deepcopy.go
@@ -0,0 +1,120 @@
+// +build !ignore_autogenerated
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by deepcopy-gen. DO NOT EDIT.
+
+package v1alpha1
+
+import (
+	runtime "k8s.io/apimachinery/pkg/runtime"
+)
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ImageReview) DeepCopyInto(out *ImageReview) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+	in.Spec.DeepCopyInto(&out.Spec)
+	in.Status.DeepCopyInto(&out.Status)
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageReview.
+func (in *ImageReview) DeepCopy() *ImageReview {
+	if in == nil {
+		return nil
+	}
+	out := new(ImageReview)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ImageReview) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ImageReviewContainerSpec) DeepCopyInto(out *ImageReviewContainerSpec) {
+	*out = *in
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageReviewContainerSpec.
+func (in *ImageReviewContainerSpec) DeepCopy() *ImageReviewContainerSpec {
+	if in == nil {
+		return nil
+	}
+	out := new(ImageReviewContainerSpec)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ImageReviewSpec) DeepCopyInto(out *ImageReviewSpec) {
+	*out = *in
+	if in.Containers != nil {
+		in, out := &in.Containers, &out.Containers
+		*out = make([]ImageReviewContainerSpec, len(*in))
+		copy(*out, *in)
+	}
+	if in.Annotations != nil {
+		in, out := &in.Annotations, &out.Annotations
+		*out = make(map[string]string, len(*in))
+		for key, val := range *in {
+			(*out)[key] = val
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageReviewSpec.
+func (in *ImageReviewSpec) DeepCopy() *ImageReviewSpec {
+	if in == nil {
+		return nil
+	}
+	out := new(ImageReviewSpec)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ImageReviewStatus) DeepCopyInto(out *ImageReviewStatus) {
+	*out = *in
+	if in.AuditAnnotations != nil {
+		in, out := &in.AuditAnnotations, &out.AuditAnnotations
+		*out = make(map[string]string, len(*in))
+		for key, val := range *in {
+			(*out)[key] = val
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageReviewStatus.
+func (in *ImageReviewStatus) DeepCopy() *ImageReviewStatus {
+	if in == nil {
+		return nil
+	}
+	out := new(ImageReviewStatus)
+	in.DeepCopyInto(out)
+	return out
+}
diff --git a/vendor/k8s.io/api/networking/v1/generated.pb.go b/vendor/k8s.io/api/networking/v1/generated.pb.go
index c9b22fc7..1ff2339b 100644
--- a/vendor/k8s.io/api/networking/v1/generated.pb.go
+++ b/vendor/k8s.io/api/networking/v1/generated.pb.go
@@ -46,7 +46,7 @@ var _ = math.Inf
 // is compatible with the proto package it is being compiled against.
 // A compilation error at this line likely means your copy of the
 // proto package needs to be updated.
-const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
+const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
 
 func (m *IPBlock) Reset()      { *m = IPBlock{} }
 func (*IPBlock) ProtoMessage() {}
@@ -2119,6 +2119,7 @@ func (m *NetworkPolicySpec) Unmarshal(dAtA []byte) error {
 func skipGenerated(dAtA []byte) (n int, err error) {
 	l := len(dAtA)
 	iNdEx := 0
+	depth := 0
 	for iNdEx < l {
 		var wire uint64
 		for shift := uint(0); ; shift += 7 {
@@ -2150,10 +2151,8 @@ func skipGenerated(dAtA []byte) (n int, err error) {
 					break
 				}
 			}
-			return iNdEx, nil
 		case 1:
 			iNdEx += 8
-			return iNdEx, nil
 		case 2:
 			var length int
 			for shift := uint(0); ; shift += 7 {
@@ -2174,55 +2173,30 @@ func skipGenerated(dAtA []byte) (n int, err error) {
 				return 0, ErrInvalidLengthGenerated
 			}
 			iNdEx += length
-			if iNdEx < 0 {
-				return 0, ErrInvalidLengthGenerated
-			}
-			return iNdEx, nil
 		case 3:
-			for {
-				var innerWire uint64
-				var start int = iNdEx
-				for shift := uint(0); ; shift += 7 {
-					if shift >= 64 {
-						return 0, ErrIntOverflowGenerated
-					}
-					if iNdEx >= l {
-						return 0, io.ErrUnexpectedEOF
-					}
-					b := dAtA[iNdEx]
-					iNdEx++
-					innerWire |= (uint64(b) & 0x7F) << shift
-					if b < 0x80 {
-						break
-					}
-				}
-				innerWireType := int(innerWire & 0x7)
-				if innerWireType == 4 {
-					break
-				}
-				next, err := skipGenerated(dAtA[start:])
-				if err != nil {
-					return 0, err
-				}
-				iNdEx = start + next
-				if iNdEx < 0 {
-					return 0, ErrInvalidLengthGenerated
-				}
-			}
-			return iNdEx, nil
+			depth++
 		case 4:
-			return iNdEx, nil
+			if depth == 0 {
+				return 0, ErrUnexpectedEndOfGroupGenerated
+			}
+			depth--
 		case 5:
 			iNdEx += 4
-			return iNdEx, nil
 		default:
 			return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
 		}
+		if iNdEx < 0 {
+			return 0, ErrInvalidLengthGenerated
+		}
+		if depth == 0 {
+			return iNdEx, nil
+		}
 	}
-	panic("unreachable")
+	return 0, io.ErrUnexpectedEOF
 }
 
 var (
-	ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling")
-	ErrIntOverflowGenerated   = fmt.Errorf("proto: integer overflow")
+	ErrInvalidLengthGenerated        = fmt.Errorf("proto: negative length found during unmarshaling")
+	ErrIntOverflowGenerated          = fmt.Errorf("proto: integer overflow")
+	ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group")
 )
diff --git a/vendor/k8s.io/api/networking/v1/generated.proto b/vendor/k8s.io/api/networking/v1/generated.proto
index 3cb73804..f2aa9690 100644
--- a/vendor/k8s.io/api/networking/v1/generated.proto
+++ b/vendor/k8s.io/api/networking/v1/generated.proto
@@ -30,16 +30,16 @@ import "k8s.io/apimachinery/pkg/util/intstr/generated.proto";
 // Package-wide variables from generator "generated".
 option go_package = "v1";
 
-// IPBlock describes a particular CIDR (Ex. "192.168.1.1/24") that is allowed to the pods
-// matched by a NetworkPolicySpec's podSelector. The except entry describes CIDRs that should
-// not be included within this rule.
+// IPBlock describes a particular CIDR (Ex. "192.168.1.1/24","2001:db9::/64") that is allowed
+// to the pods matched by a NetworkPolicySpec's podSelector. The except entry describes CIDRs
+// that should not be included within this rule.
 message IPBlock {
   // CIDR is a string representing the IP Block
-  // Valid examples are "192.168.1.1/24"
+  // Valid examples are "192.168.1.1/24" or "2001:db9::/64"
   optional string cidr = 1;
 
   // Except is a slice of CIDRs that should not be included within an IP Block
-  // Valid examples are "192.168.1.1/24"
+  // Valid examples are "192.168.1.1/24" or "2001:db9::/64"
   // Except values will be rejected if they are outside the CIDR range
   // +optional
   repeated string except = 2;
diff --git a/vendor/k8s.io/api/networking/v1/types.go b/vendor/k8s.io/api/networking/v1/types.go
index 38a640f0..73580a50 100644
--- a/vendor/k8s.io/api/networking/v1/types.go
+++ b/vendor/k8s.io/api/networking/v1/types.go
@@ -147,15 +147,15 @@ type NetworkPolicyPort struct {
 	Port *intstr.IntOrString `json:"port,omitempty" protobuf:"bytes,2,opt,name=port"`
 }
 
-// IPBlock describes a particular CIDR (Ex. "192.168.1.1/24") that is allowed to the pods
-// matched by a NetworkPolicySpec's podSelector. The except entry describes CIDRs that should
-// not be included within this rule.
+// IPBlock describes a particular CIDR (Ex. "192.168.1.1/24","2001:db9::/64") that is allowed
+// to the pods matched by a NetworkPolicySpec's podSelector. The except entry describes CIDRs
+// that should not be included within this rule.
 type IPBlock struct {
 	// CIDR is a string representing the IP Block
-	// Valid examples are "192.168.1.1/24"
+	// Valid examples are "192.168.1.1/24" or "2001:db9::/64"
 	CIDR string `json:"cidr" protobuf:"bytes,1,name=cidr"`
 	// Except is a slice of CIDRs that should not be included within an IP Block
-	// Valid examples are "192.168.1.1/24"
+	// Valid examples are "192.168.1.1/24" or "2001:db9::/64"
 	// Except values will be rejected if they are outside the CIDR range
 	// +optional
 	Except []string `json:"except,omitempty" protobuf:"bytes,2,rep,name=except"`
diff --git a/vendor/k8s.io/api/networking/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/networking/v1/types_swagger_doc_generated.go
index 188b72a1..b404e5b1 100644
--- a/vendor/k8s.io/api/networking/v1/types_swagger_doc_generated.go
+++ b/vendor/k8s.io/api/networking/v1/types_swagger_doc_generated.go
@@ -28,9 +28,9 @@ package v1
 
 // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT.
 var map_IPBlock = map[string]string{
-	"":       "IPBlock describes a particular CIDR (Ex. \"192.168.1.1/24\") that is allowed to the pods matched by a NetworkPolicySpec's podSelector. The except entry describes CIDRs that should not be included within this rule.",
-	"cidr":   "CIDR is a string representing the IP Block Valid examples are \"192.168.1.1/24\"",
-	"except": "Except is a slice of CIDRs that should not be included within an IP Block Valid examples are \"192.168.1.1/24\" Except values will be rejected if they are outside the CIDR range",
+	"":       "IPBlock describes a particular CIDR (Ex. \"192.168.1.1/24\",\"2001:db9::/64\") that is allowed to the pods matched by a NetworkPolicySpec's podSelector. The except entry describes CIDRs that should not be included within this rule.",
+	"cidr":   "CIDR is a string representing the IP Block Valid examples are \"192.168.1.1/24\" or \"2001:db9::/64\"",
+	"except": "Except is a slice of CIDRs that should not be included within an IP Block Valid examples are \"192.168.1.1/24\" or \"2001:db9::/64\" Except values will be rejected if they are outside the CIDR range",
 }
 
 func (IPBlock) SwaggerDoc() map[string]string {
diff --git a/vendor/k8s.io/api/networking/v1beta1/generated.pb.go b/vendor/k8s.io/api/networking/v1beta1/generated.pb.go
index 8ed56009..6f51df86 100644
--- a/vendor/k8s.io/api/networking/v1beta1/generated.pb.go
+++ b/vendor/k8s.io/api/networking/v1beta1/generated.pb.go
@@ -25,6 +25,7 @@ import (
 	io "io"
 
 	proto "github.com/gogo/protobuf/proto"
+	v11 "k8s.io/api/core/v1"
 
 	math "math"
 	math_bits "math/bits"
@@ -41,7 +42,7 @@ var _ = math.Inf
 // is compatible with the proto package it is being compiled against.
 // A compilation error at this line likely means your copy of the
 // proto package needs to be updated.
-const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
+const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
 
 func (m *HTTPIngressPath) Reset()      { *m = HTTPIngressPath{} }
 func (*HTTPIngressPath) ProtoMessage() {}
@@ -155,10 +156,94 @@ func (m *IngressBackend) XXX_DiscardUnknown() {
 
 var xxx_messageInfo_IngressBackend proto.InternalMessageInfo
 
+func (m *IngressClass) Reset()      { *m = IngressClass{} }
+func (*IngressClass) ProtoMessage() {}
+func (*IngressClass) Descriptor() ([]byte, []int) {
+	return fileDescriptor_5bea11de0ceb8f53, []int{4}
+}
+func (m *IngressClass) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *IngressClass) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *IngressClass) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_IngressClass.Merge(m, src)
+}
+func (m *IngressClass) XXX_Size() int {
+	return m.Size()
+}
+func (m *IngressClass) XXX_DiscardUnknown() {
+	xxx_messageInfo_IngressClass.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_IngressClass proto.InternalMessageInfo
+
+func (m *IngressClassList) Reset()      { *m = IngressClassList{} }
+func (*IngressClassList) ProtoMessage() {}
+func (*IngressClassList) Descriptor() ([]byte, []int) {
+	return fileDescriptor_5bea11de0ceb8f53, []int{5}
+}
+func (m *IngressClassList) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *IngressClassList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *IngressClassList) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_IngressClassList.Merge(m, src)
+}
+func (m *IngressClassList) XXX_Size() int {
+	return m.Size()
+}
+func (m *IngressClassList) XXX_DiscardUnknown() {
+	xxx_messageInfo_IngressClassList.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_IngressClassList proto.InternalMessageInfo
+
+func (m *IngressClassSpec) Reset()      { *m = IngressClassSpec{} }
+func (*IngressClassSpec) ProtoMessage() {}
+func (*IngressClassSpec) Descriptor() ([]byte, []int) {
+	return fileDescriptor_5bea11de0ceb8f53, []int{6}
+}
+func (m *IngressClassSpec) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *IngressClassSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *IngressClassSpec) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_IngressClassSpec.Merge(m, src)
+}
+func (m *IngressClassSpec) XXX_Size() int {
+	return m.Size()
+}
+func (m *IngressClassSpec) XXX_DiscardUnknown() {
+	xxx_messageInfo_IngressClassSpec.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_IngressClassSpec proto.InternalMessageInfo
+
 func (m *IngressList) Reset()      { *m = IngressList{} }
 func (*IngressList) ProtoMessage() {}
 func (*IngressList) Descriptor() ([]byte, []int) {
-	return fileDescriptor_5bea11de0ceb8f53, []int{4}
+	return fileDescriptor_5bea11de0ceb8f53, []int{7}
 }
 func (m *IngressList) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -186,7 +271,7 @@ var xxx_messageInfo_IngressList proto.InternalMessageInfo
 func (m *IngressRule) Reset()      { *m = IngressRule{} }
 func (*IngressRule) ProtoMessage() {}
 func (*IngressRule) Descriptor() ([]byte, []int) {
-	return fileDescriptor_5bea11de0ceb8f53, []int{5}
+	return fileDescriptor_5bea11de0ceb8f53, []int{8}
 }
 func (m *IngressRule) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -214,7 +299,7 @@ var xxx_messageInfo_IngressRule proto.InternalMessageInfo
 func (m *IngressRuleValue) Reset()      { *m = IngressRuleValue{} }
 func (*IngressRuleValue) ProtoMessage() {}
 func (*IngressRuleValue) Descriptor() ([]byte, []int) {
-	return fileDescriptor_5bea11de0ceb8f53, []int{6}
+	return fileDescriptor_5bea11de0ceb8f53, []int{9}
 }
 func (m *IngressRuleValue) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -242,7 +327,7 @@ var xxx_messageInfo_IngressRuleValue proto.InternalMessageInfo
 func (m *IngressSpec) Reset()      { *m = IngressSpec{} }
 func (*IngressSpec) ProtoMessage() {}
 func (*IngressSpec) Descriptor() ([]byte, []int) {
-	return fileDescriptor_5bea11de0ceb8f53, []int{7}
+	return fileDescriptor_5bea11de0ceb8f53, []int{10}
 }
 func (m *IngressSpec) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -270,7 +355,7 @@ var xxx_messageInfo_IngressSpec proto.InternalMessageInfo
 func (m *IngressStatus) Reset()      { *m = IngressStatus{} }
 func (*IngressStatus) ProtoMessage() {}
 func (*IngressStatus) Descriptor() ([]byte, []int) {
-	return fileDescriptor_5bea11de0ceb8f53, []int{8}
+	return fileDescriptor_5bea11de0ceb8f53, []int{11}
 }
 func (m *IngressStatus) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -298,7 +383,7 @@ var xxx_messageInfo_IngressStatus proto.InternalMessageInfo
 func (m *IngressTLS) Reset()      { *m = IngressTLS{} }
 func (*IngressTLS) ProtoMessage() {}
 func (*IngressTLS) Descriptor() ([]byte, []int) {
-	return fileDescriptor_5bea11de0ceb8f53, []int{9}
+	return fileDescriptor_5bea11de0ceb8f53, []int{12}
 }
 func (m *IngressTLS) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -328,6 +413,9 @@ func init() {
 	proto.RegisterType((*HTTPIngressRuleValue)(nil), "k8s.io.api.networking.v1beta1.HTTPIngressRuleValue")
 	proto.RegisterType((*Ingress)(nil), "k8s.io.api.networking.v1beta1.Ingress")
 	proto.RegisterType((*IngressBackend)(nil), "k8s.io.api.networking.v1beta1.IngressBackend")
+	proto.RegisterType((*IngressClass)(nil), "k8s.io.api.networking.v1beta1.IngressClass")
+	proto.RegisterType((*IngressClassList)(nil), "k8s.io.api.networking.v1beta1.IngressClassList")
+	proto.RegisterType((*IngressClassSpec)(nil), "k8s.io.api.networking.v1beta1.IngressClassSpec")
 	proto.RegisterType((*IngressList)(nil), "k8s.io.api.networking.v1beta1.IngressList")
 	proto.RegisterType((*IngressRule)(nil), "k8s.io.api.networking.v1beta1.IngressRule")
 	proto.RegisterType((*IngressRuleValue)(nil), "k8s.io.api.networking.v1beta1.IngressRuleValue")
@@ -341,58 +429,69 @@ func init() {
 }
 
 var fileDescriptor_5bea11de0ceb8f53 = []byte{
-	// 812 bytes of a gzipped FileDescriptorProto
-	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x55, 0xcf, 0x6e, 0xfb, 0x44,
-	0x10, 0x8e, 0xf3, 0xa7, 0x69, 0xd7, 0xfd, 0xa7, 0xa5, 0x87, 0xa8, 0x12, 0x6e, 0xe4, 0x03, 0x2a,
-	0x88, 0xae, 0x69, 0x0a, 0x88, 0xb3, 0x0f, 0xa8, 0x15, 0x81, 0x86, 0x75, 0x84, 0x10, 0xe2, 0xd0,
-	0x8d, 0xb3, 0x38, 0x26, 0x89, 0x6d, 0x76, 0xd7, 0x41, 0xdc, 0x78, 0x01, 0x04, 0x4f, 0xc1, 0x99,
-	0x23, 0x8f, 0xd0, 0x63, 0x8f, 0x3d, 0x55, 0x34, 0xbc, 0x07, 0x42, 0xbb, 0xde, 0xda, 0x4e, 0xd2,
-	0xfe, 0x6a, 0xfd, 0x6e, 0xde, 0x9d, 0xf9, 0xbe, 0xd9, 0x99, 0xf9, 0x66, 0x0c, 0x3e, 0x9f, 0x7e,
-	0xc6, 0x51, 0x18, 0x3b, 0xd3, 0x74, 0x44, 0x59, 0x44, 0x05, 0xe5, 0xce, 0x82, 0x46, 0xe3, 0x98,
-	0x39, 0xda, 0x40, 0x92, 0xd0, 0x89, 0xa8, 0xf8, 0x39, 0x66, 0xd3, 0x30, 0x0a, 0x9c, 0xc5, 0xf9,
-	0x88, 0x0a, 0x72, 0xee, 0x04, 0x34, 0xa2, 0x8c, 0x08, 0x3a, 0x46, 0x09, 0x8b, 0x45, 0x0c, 0xdf,
-	0xcd, 0xdc, 0x11, 0x49, 0x42, 0x54, 0xb8, 0x23, 0xed, 0x7e, 0x7c, 0x16, 0x84, 0x62, 0x92, 0x8e,
-	0x90, 0x1f, 0xcf, 0x9d, 0x20, 0x0e, 0x62, 0x47, 0xa1, 0x46, 0xe9, 0x0f, 0xea, 0xa4, 0x0e, 0xea,
-	0x2b, 0x63, 0x3b, 0xb6, 0x4b, 0xc1, 0xfd, 0x98, 0x51, 0x67, 0xb1, 0x11, 0xf1, 0xf8, 0xe3, 0xc2,
-	0x67, 0x4e, 0xfc, 0x49, 0x18, 0x51, 0xf6, 0x8b, 0x93, 0x4c, 0x03, 0x79, 0xc1, 0x9d, 0x39, 0x15,
-	0xe4, 0x39, 0x94, 0xf3, 0x12, 0x8a, 0xa5, 0x91, 0x08, 0xe7, 0x74, 0x03, 0xf0, 0xe9, 0x6b, 0x00,
-	0xee, 0x4f, 0xe8, 0x9c, 0x6c, 0xe0, 0x2e, 0x5e, 0xc2, 0xa5, 0x22, 0x9c, 0x39, 0x61, 0x24, 0xb8,
-	0x60, 0xeb, 0x20, 0xfb, 0x37, 0x03, 0x1c, 0x5c, 0x0e, 0x87, 0x83, 0xab, 0x28, 0x60, 0x94, 0xf3,
-	0x01, 0x11, 0x13, 0xd8, 0x05, 0xcd, 0x84, 0x88, 0x49, 0xc7, 0xe8, 0x1a, 0xa7, 0x3b, 0xee, 0xee,
-	0xed, 0xc3, 0x49, 0x6d, 0xf9, 0x70, 0xd2, 0x94, 0x36, 0xac, 0x2c, 0xf0, 0x5b, 0xd0, 0x1e, 0x11,
-	0x7f, 0x4a, 0xa3, 0x71, 0xa7, 0xde, 0x35, 0x4e, 0xcd, 0xde, 0x19, 0x7a, 0x63, 0x37, 0x90, 0xa6,
-	0x77, 0x33, 0x90, 0x7b, 0xa0, 0x39, 0xdb, 0xfa, 0x02, 0x3f, 0xd1, 0xd9, 0x53, 0x70, 0x54, 0x7a,
-	0x0e, 0x4e, 0x67, 0xf4, 0x1b, 0x32, 0x4b, 0x29, 0xf4, 0x40, 0x4b, 0x46, 0xe6, 0x1d, 0xa3, 0xdb,
-	0x38, 0x35, 0x7b, 0xe8, 0x95, 0x78, 0x6b, 0x29, 0xb9, 0x7b, 0x3a, 0x60, 0x4b, 0x9e, 0x38, 0xce,
-	0xb8, 0xec, 0xdf, 0xeb, 0xa0, 0xad, 0xbd, 0xe0, 0x0d, 0xd8, 0x96, 0x1d, 0x1c, 0x13, 0x41, 0x54,
-	0xe2, 0x66, 0xef, 0xa3, 0x52, 0x8c, 0xbc, 0xa0, 0x28, 0x99, 0x06, 0xf2, 0x82, 0x23, 0xe9, 0x8d,
-	0x16, 0xe7, 0xe8, 0x7a, 0xf4, 0x23, 0xf5, 0xc5, 0x97, 0x54, 0x10, 0x17, 0xea, 0x28, 0xa0, 0xb8,
-	0xc3, 0x39, 0x2b, 0xec, 0x83, 0x26, 0x4f, 0xa8, 0xaf, 0x2b, 0xf6, 0x41, 0xb5, 0x8a, 0x79, 0x09,
-	0xf5, 0x8b, 0x16, 0xc8, 0x13, 0x56, 0x2c, 0x70, 0x08, 0xb6, 0xb8, 0x20, 0x22, 0xe5, 0x9d, 0x86,
-	0xe2, 0xfb, 0xb0, 0x22, 0x9f, 0xc2, 0xb8, 0xfb, 0x9a, 0x71, 0x2b, 0x3b, 0x63, 0xcd, 0x65, 0xff,
-	0x65, 0x80, 0xfd, 0xd5, 0x5e, 0xc1, 0x4f, 0x80, 0xc9, 0x29, 0x5b, 0x84, 0x3e, 0xfd, 0x8a, 0xcc,
-	0xa9, 0x16, 0xc5, 0x3b, 0x1a, 0x6f, 0x7a, 0x85, 0x09, 0x97, 0xfd, 0x60, 0x90, 0xc3, 0x06, 0x31,
-	0x13, 0x3a, 0xe9, 0x97, 0x4b, 0x2a, 0x35, 0x8a, 0x32, 0x8d, 0xa2, 0xab, 0x48, 0x5c, 0x33, 0x4f,
-	0xb0, 0x30, 0x0a, 0x36, 0x02, 0x49, 0x32, 0x5c, 0x66, 0xb6, 0xff, 0x36, 0x80, 0xa9, 0x9f, 0xdc,
-	0x0f, 0xb9, 0x80, 0xdf, 0x6f, 0x34, 0x12, 0x55, 0x6b, 0xa4, 0x44, 0xab, 0x36, 0x1e, 0xea, 0x98,
-	0xdb, 0x4f, 0x37, 0xa5, 0x26, 0x7e, 0x01, 0x5a, 0xa1, 0xa0, 0x73, 0xde, 0xa9, 0x2b, 0x1d, 0xbe,
-	0x57, 0x51, 0xf7, 0xb9, 0xfe, 0xae, 0x24, 0x18, 0x67, 0x1c, 0xf6, 0x9f, 0xc5, 0xd3, 0xa5, 0xd2,
-	0xe5, 0xe0, 0x4d, 0x62, 0x2e, 0xd6, 0x07, 0xef, 0x32, 0xe6, 0x02, 0x2b, 0x0b, 0x4c, 0xc1, 0x61,
-	0xb8, 0x36, 0x1a, 0xba, 0xb4, 0x4e, 0xb5, 0x97, 0xe4, 0x30, 0xb7, 0xa3, 0xe9, 0x0f, 0xd7, 0x2d,
-	0x78, 0x23, 0x84, 0x4d, 0xc1, 0x86, 0x17, 0xfc, 0x1a, 0x34, 0x27, 0x42, 0x24, 0xba, 0xc6, 0x17,
-	0xd5, 0x07, 0xb2, 0x78, 0xc2, 0xb6, 0xca, 0x6e, 0x38, 0x1c, 0x60, 0x45, 0x65, 0xff, 0x57, 0xd4,
-	0xc3, 0xcb, 0x34, 0x9e, 0xaf, 0x19, 0xe3, 0x6d, 0xd6, 0x8c, 0xf9, 0xdc, 0x8a, 0x81, 0x97, 0xa0,
-	0x21, 0x66, 0x4f, 0x0d, 0x7c, 0xbf, 0x1a, 0xe3, 0xb0, 0xef, 0xb9, 0xa6, 0x2e, 0x58, 0x63, 0xd8,
-	0xf7, 0xb0, 0xa4, 0x80, 0xd7, 0xa0, 0xc5, 0xd2, 0x19, 0x95, 0x23, 0xd8, 0xa8, 0x3e, 0xd2, 0x32,
-	0xff, 0x42, 0x10, 0xf2, 0xc4, 0x71, 0xc6, 0x63, 0xff, 0x04, 0xf6, 0x56, 0xe6, 0x14, 0xde, 0x80,
-	0xdd, 0x59, 0x4c, 0xc6, 0x2e, 0x99, 0x91, 0xc8, 0xa7, 0x4c, 0x97, 0x61, 0x45, 0x75, 0xf2, 0x6f,
-	0xa5, 0xe4, 0x5b, 0xf2, 0xd3, 0x53, 0x7e, 0xa4, 0x83, 0xec, 0x96, 0x6d, 0x78, 0x85, 0xd1, 0x26,
-	0x00, 0x14, 0x39, 0xc2, 0x13, 0xd0, 0x92, 0x3a, 0xcb, 0xd6, 0xec, 0x8e, 0xbb, 0x23, 0x5f, 0x28,
-	0xe5, 0xc7, 0x71, 0x76, 0x0f, 0x7b, 0x00, 0x70, 0xea, 0x33, 0x2a, 0xd4, 0x32, 0xa8, 0x2b, 0xa1,
-	0xe6, 0x6b, 0xcf, 0xcb, 0x2d, 0xb8, 0xe4, 0xe5, 0x9e, 0xdd, 0x3e, 0x5a, 0xb5, 0xbb, 0x47, 0xab,
-	0x76, 0xff, 0x68, 0xd5, 0x7e, 0x5d, 0x5a, 0xc6, 0xed, 0xd2, 0x32, 0xee, 0x96, 0x96, 0x71, 0xbf,
-	0xb4, 0x8c, 0x7f, 0x96, 0x96, 0xf1, 0xc7, 0xbf, 0x56, 0xed, 0xbb, 0xb6, 0x2e, 0xd3, 0xff, 0x01,
-	0x00, 0x00, 0xff, 0xff, 0xdb, 0x8a, 0xe4, 0xd8, 0x21, 0x08, 0x00, 0x00,
+	// 990 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x56, 0x4f, 0x6f, 0xe3, 0x44,
+	0x14, 0xaf, 0x93, 0x66, 0x9b, 0x4e, 0xb2, 0xdd, 0x6a, 0xe8, 0x21, 0xaa, 0x84, 0x5b, 0xf9, 0x80,
+	0xca, 0x9f, 0xda, 0x34, 0xbb, 0x20, 0x8e, 0xc8, 0x2b, 0xa1, 0x56, 0x04, 0x1a, 0x26, 0x16, 0x20,
+	0x04, 0xd2, 0x4e, 0x9c, 0xb7, 0x8e, 0x89, 0x63, 0x9b, 0x99, 0x71, 0xd0, 0xde, 0xb8, 0x72, 0x82,
+	0x2f, 0x01, 0x9f, 0x81, 0x23, 0x82, 0x4b, 0x8f, 0x7b, 0xdc, 0x53, 0x45, 0xc3, 0xb7, 0xe0, 0x84,
+	0x66, 0x3c, 0xb5, 0x9d, 0xa4, 0xa5, 0x59, 0x0e, 0x7b, 0x8a, 0x67, 0xde, 0x7b, 0xbf, 0x37, 0xef,
+	0xf7, 0x7e, 0x33, 0x2f, 0xe8, 0xa3, 0xc9, 0x07, 0xdc, 0x0e, 0x13, 0x67, 0x92, 0x0d, 0x81, 0xc5,
+	0x20, 0x80, 0x3b, 0x33, 0x88, 0x47, 0x09, 0x73, 0xb4, 0x81, 0xa6, 0xa1, 0x13, 0x83, 0xf8, 0x3e,
+	0x61, 0x93, 0x30, 0x0e, 0x9c, 0xd9, 0xc9, 0x10, 0x04, 0x3d, 0x71, 0x02, 0x88, 0x81, 0x51, 0x01,
+	0x23, 0x3b, 0x65, 0x89, 0x48, 0xf0, 0xeb, 0xb9, 0xbb, 0x4d, 0xd3, 0xd0, 0x2e, 0xdd, 0x6d, 0xed,
+	0xbe, 0x7f, 0x1c, 0x84, 0x62, 0x9c, 0x0d, 0x6d, 0x3f, 0x99, 0x3a, 0x41, 0x12, 0x24, 0x8e, 0x8a,
+	0x1a, 0x66, 0x4f, 0xd5, 0x4a, 0x2d, 0xd4, 0x57, 0x8e, 0xb6, 0x6f, 0x55, 0x92, 0xfb, 0x09, 0x03,
+	0x67, 0xb6, 0x92, 0x71, 0xff, 0x51, 0xe9, 0x33, 0xa5, 0xfe, 0x38, 0x8c, 0x81, 0x3d, 0x73, 0xd2,
+	0x49, 0x20, 0x37, 0xb8, 0x33, 0x05, 0x41, 0x6f, 0x8a, 0x72, 0x6e, 0x8b, 0x62, 0x59, 0x2c, 0xc2,
+	0x29, 0xac, 0x04, 0xbc, 0x7f, 0x57, 0x00, 0xf7, 0xc7, 0x30, 0xa5, 0x2b, 0x71, 0x0f, 0x6f, 0x8b,
+	0xcb, 0x44, 0x18, 0x39, 0x61, 0x2c, 0xb8, 0x60, 0xcb, 0x41, 0xd6, 0x9f, 0x06, 0x7a, 0x70, 0xea,
+	0x79, 0xfd, 0xb3, 0x38, 0x60, 0xc0, 0x79, 0x9f, 0x8a, 0x31, 0x3e, 0x44, 0x9b, 0x29, 0x15, 0xe3,
+	0x8e, 0x71, 0x68, 0x1c, 0x6d, 0xbb, 0xed, 0x8b, 0xcb, 0x83, 0x8d, 0xf9, 0xe5, 0xc1, 0xa6, 0xb4,
+	0x11, 0x65, 0xc1, 0x8f, 0x50, 0x53, 0xfe, 0x7a, 0xcf, 0x52, 0xe8, 0xd4, 0x95, 0x57, 0x67, 0x7e,
+	0x79, 0xd0, 0xec, 0xeb, 0xbd, 0x7f, 0x2a, 0xdf, 0xa4, 0xf0, 0xc4, 0x5f, 0xa2, 0xad, 0x21, 0xf5,
+	0x27, 0x10, 0x8f, 0x3a, 0xb5, 0x43, 0xe3, 0xa8, 0xd5, 0x3d, 0xb6, 0xff, 0xb3, 0x87, 0xb6, 0x3e,
+	0x94, 0x9b, 0x07, 0xb9, 0x0f, 0xf4, 0x49, 0xb6, 0xf4, 0x06, 0xb9, 0x86, 0xb3, 0x26, 0x68, 0xaf,
+	0x52, 0x04, 0xc9, 0x22, 0xf8, 0x9c, 0x46, 0x19, 0xe0, 0x01, 0x6a, 0xc8, 0xec, 0xbc, 0x63, 0x1c,
+	0xd6, 0x8f, 0x5a, 0x5d, 0xfb, 0x8e, 0x7c, 0x4b, 0x44, 0xb8, 0xf7, 0x75, 0xc2, 0x86, 0x5c, 0x71,
+	0x92, 0x63, 0x59, 0x3f, 0xd5, 0xd0, 0x96, 0xf6, 0xc2, 0x4f, 0x50, 0x53, 0xf6, 0x7d, 0x44, 0x05,
+	0x55, 0x74, 0xb5, 0xba, 0xef, 0x56, 0x72, 0x14, 0x6d, 0xb0, 0xd3, 0x49, 0x20, 0x37, 0xb8, 0x2d,
+	0xbd, 0xed, 0xd9, 0x89, 0x7d, 0x3e, 0xfc, 0x16, 0x7c, 0xf1, 0x09, 0x08, 0xea, 0x62, 0x9d, 0x05,
+	0x95, 0x7b, 0xa4, 0x40, 0xc5, 0x3d, 0xb4, 0xc9, 0x53, 0xf0, 0x35, 0x63, 0x6f, 0xad, 0xc7, 0xd8,
+	0x20, 0x05, 0xbf, 0x6c, 0x9c, 0x5c, 0x11, 0x85, 0x82, 0x3d, 0x74, 0x8f, 0x0b, 0x2a, 0x32, 0xae,
+	0xda, 0xd6, 0xea, 0xbe, 0xb3, 0x26, 0x9e, 0x8a, 0x71, 0x77, 0x34, 0xe2, 0xbd, 0x7c, 0x4d, 0x34,
+	0x96, 0xf5, 0x63, 0x0d, 0xed, 0x2c, 0xf6, 0x0a, 0xbf, 0x87, 0x5a, 0x1c, 0xd8, 0x2c, 0xf4, 0xe1,
+	0x53, 0x3a, 0x05, 0x2d, 0xa5, 0xd7, 0x74, 0x7c, 0x6b, 0x50, 0x9a, 0x48, 0xd5, 0x0f, 0x07, 0x45,
+	0x58, 0x3f, 0x61, 0x42, 0x17, 0x7d, 0x3b, 0xa5, 0x52, 0xd9, 0x76, 0xae, 0x6c, 0xfb, 0x2c, 0x16,
+	0xe7, 0x6c, 0x20, 0x58, 0x18, 0x07, 0x2b, 0x89, 0x24, 0x18, 0xa9, 0x22, 0xe3, 0x2f, 0x50, 0x93,
+	0x01, 0x4f, 0x32, 0xe6, 0x83, 0xa6, 0x62, 0x41, 0x8c, 0xf2, 0x09, 0x90, 0x6d, 0x92, 0xba, 0x1d,
+	0xf5, 0x12, 0x9f, 0x46, 0x79, 0x73, 0x08, 0x3c, 0x05, 0x06, 0xb1, 0x0f, 0x6e, 0x5b, 0x0a, 0x9e,
+	0x68, 0x08, 0x52, 0x80, 0xc9, 0x0b, 0xd5, 0xd6, 0x5c, 0x3c, 0x8e, 0xe8, 0x2b, 0x91, 0xc8, 0x67,
+	0x0b, 0x12, 0x71, 0xd6, 0x6b, 0xa9, 0x3a, 0xdc, 0x6d, 0x3a, 0xb1, 0xfe, 0x30, 0xd0, 0x6e, 0xd5,
+	0xb1, 0x17, 0x72, 0x81, 0xbf, 0x5e, 0xa9, 0xc4, 0x5e, 0xaf, 0x12, 0x19, 0xad, 0xea, 0xd8, 0xd5,
+	0xa9, 0x9a, 0xd7, 0x3b, 0x95, 0x2a, 0xfa, 0xa8, 0x11, 0x0a, 0x98, 0xf2, 0x4e, 0x4d, 0xdd, 0xd5,
+	0xb7, 0x5f, 0xa2, 0x8c, 0xf2, 0xa2, 0x9e, 0x49, 0x04, 0x92, 0x03, 0x59, 0xbf, 0x2c, 0x15, 0x21,
+	0xeb, 0xc3, 0x5d, 0x84, 0xfc, 0x24, 0x16, 0x2c, 0x89, 0x22, 0x60, 0x5a, 0x97, 0x05, 0xbd, 0x8f,
+	0x0b, 0x0b, 0xa9, 0x78, 0xe1, 0x6f, 0x10, 0x4a, 0x29, 0xa3, 0x53, 0x10, 0xc0, 0xf8, 0x4d, 0x6f,
+	0xd7, 0xdd, 0x72, 0xd9, 0x91, 0xf0, 0xfd, 0x02, 0x84, 0x54, 0x00, 0xad, 0xdf, 0x0c, 0xd4, 0xd2,
+	0xe7, 0x7c, 0x05, 0x3c, 0x7f, 0xbc, 0xc8, 0xf3, 0x1b, 0x6b, 0xbe, 0xc1, 0x37, 0x53, 0xfc, 0x6b,
+	0x79, 0x74, 0xf9, 0xea, 0xca, 0xd1, 0x31, 0x4e, 0xb8, 0x58, 0x1e, 0x1d, 0xa7, 0x09, 0x17, 0x44,
+	0x59, 0x70, 0x86, 0x76, 0xc3, 0xa5, 0x67, 0xfa, 0xe5, 0x84, 0x5b, 0x84, 0xb9, 0x1d, 0x0d, 0xbf,
+	0xbb, 0x6c, 0x21, 0x2b, 0x29, 0x2c, 0x40, 0x2b, 0x5e, 0xf2, 0xde, 0x8c, 0x85, 0x48, 0x35, 0xc7,
+	0x0f, 0xd7, 0x1f, 0x0e, 0xe5, 0x11, 0x9a, 0xaa, 0x3a, 0xcf, 0xeb, 0x13, 0x05, 0x65, 0xfd, 0x5e,
+	0x2b, 0xf8, 0x50, 0x6a, 0xfb, 0xb0, 0xa8, 0x56, 0x29, 0x50, 0xbd, 0x85, 0x9b, 0x8a, 0x9b, 0xbd,
+	0xca, 0xc1, 0x0b, 0x1b, 0x59, 0xf1, 0xc6, 0x5e, 0x39, 0x34, 0x8d, 0xff, 0x33, 0x34, 0x5b, 0x37,
+	0x0d, 0x4c, 0x7c, 0x8a, 0xea, 0x22, 0xba, 0x96, 0xc0, 0x9b, 0xeb, 0x21, 0x7a, 0xbd, 0x81, 0xdb,
+	0xd2, 0x94, 0xd7, 0xbd, 0xde, 0x80, 0x48, 0x08, 0x7c, 0x8e, 0x1a, 0x2c, 0x8b, 0x40, 0x0e, 0x94,
+	0xfa, 0xfa, 0x03, 0x4a, 0x32, 0x58, 0x4a, 0x4a, 0xae, 0x38, 0xc9, 0x71, 0xac, 0xef, 0xd0, 0xfd,
+	0x85, 0xa9, 0x83, 0x9f, 0xa0, 0x76, 0x94, 0xd0, 0x91, 0x4b, 0x23, 0x1a, 0xfb, 0xfa, 0xce, 0x2e,
+	0xe9, 0xf6, 0xfa, 0xfe, 0xf5, 0x2a, 0x7e, 0x7a, 0x66, 0xed, 0xe9, 0x24, 0xed, 0xaa, 0x8d, 0x2c,
+	0x20, 0x5a, 0x14, 0xa1, 0xb2, 0x46, 0x7c, 0x80, 0x1a, 0x52, 0xa9, 0xf9, 0x9f, 0x86, 0x6d, 0x77,
+	0x5b, 0x9e, 0x50, 0x0a, 0x98, 0x93, 0x7c, 0x5f, 0x3e, 0x21, 0x1c, 0x7c, 0x06, 0x42, 0xb5, 0xb3,
+	0xb6, 0xf8, 0x84, 0x0c, 0x0a, 0x0b, 0xa9, 0x78, 0xb9, 0xc7, 0x17, 0x57, 0xe6, 0xc6, 0xf3, 0x2b,
+	0x73, 0xe3, 0xc5, 0x95, 0xb9, 0xf1, 0xc3, 0xdc, 0x34, 0x2e, 0xe6, 0xa6, 0xf1, 0x7c, 0x6e, 0x1a,
+	0x2f, 0xe6, 0xa6, 0xf1, 0xd7, 0xdc, 0x34, 0x7e, 0xfe, 0xdb, 0xdc, 0xf8, 0x6a, 0x4b, 0xd3, 0xf4,
+	0x6f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x6e, 0x54, 0x4d, 0x9d, 0x25, 0x0b, 0x00, 0x00,
 }
 
 func (m *HTTPIngressPath) Marshal() (dAtA []byte, err error) {
@@ -415,6 +514,13 @@ func (m *HTTPIngressPath) MarshalToSizedBuffer(dAtA []byte) (int, error) {
 	_ = i
 	var l int
 	_ = l
+	if m.PathType != nil {
+		i -= len(*m.PathType)
+		copy(dAtA[i:], *m.PathType)
+		i = encodeVarintGenerated(dAtA, i, uint64(len(*m.PathType)))
+		i--
+		dAtA[i] = 0x1a
+	}
 	{
 		size, err := m.Backend.MarshalToSizedBuffer(dAtA[:i])
 		if err != nil {
@@ -543,6 +649,18 @@ func (m *IngressBackend) MarshalToSizedBuffer(dAtA []byte) (int, error) {
 	_ = i
 	var l int
 	_ = l
+	if m.Resource != nil {
+		{
+			size, err := m.Resource.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x1a
+	}
 	{
 		size, err := m.ServicePort.MarshalToSizedBuffer(dAtA[:i])
 		if err != nil {
@@ -561,6 +679,136 @@ func (m *IngressBackend) MarshalToSizedBuffer(dAtA []byte) (int, error) {
 	return len(dAtA) - i, nil
 }
 
+func (m *IngressClass) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *IngressClass) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *IngressClass) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	{
+		size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i])
+		if err != nil {
+			return 0, err
+		}
+		i -= size
+		i = encodeVarintGenerated(dAtA, i, uint64(size))
+	}
+	i--
+	dAtA[i] = 0x12
+	{
+		size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
+		if err != nil {
+			return 0, err
+		}
+		i -= size
+		i = encodeVarintGenerated(dAtA, i, uint64(size))
+	}
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *IngressClassList) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *IngressClassList) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *IngressClassList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if len(m.Items) > 0 {
+		for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- {
+			{
+				size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintGenerated(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0x12
+		}
+	}
+	{
+		size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i])
+		if err != nil {
+			return 0, err
+		}
+		i -= size
+		i = encodeVarintGenerated(dAtA, i, uint64(size))
+	}
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *IngressClassSpec) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *IngressClassSpec) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *IngressClassSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.Parameters != nil {
+		{
+			size, err := m.Parameters.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintGenerated(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x12
+	}
+	i -= len(m.Controller)
+	copy(dAtA[i:], m.Controller)
+	i = encodeVarintGenerated(dAtA, i, uint64(len(m.Controller)))
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
 func (m *IngressList) Marshal() (dAtA []byte, err error) {
 	size := m.Size()
 	dAtA = make([]byte, size)
@@ -701,6 +949,13 @@ func (m *IngressSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) {
 	_ = i
 	var l int
 	_ = l
+	if m.IngressClassName != nil {
+		i -= len(*m.IngressClassName)
+		copy(dAtA[i:], *m.IngressClassName)
+		i = encodeVarintGenerated(dAtA, i, uint64(len(*m.IngressClassName)))
+		i--
+		dAtA[i] = 0x22
+	}
 	if len(m.Rules) > 0 {
 		for iNdEx := len(m.Rules) - 1; iNdEx >= 0; iNdEx-- {
 			{
@@ -835,6 +1090,10 @@ func (m *HTTPIngressPath) Size() (n int) {
 	n += 1 + l + sovGenerated(uint64(l))
 	l = m.Backend.Size()
 	n += 1 + l + sovGenerated(uint64(l))
+	if m.PathType != nil {
+		l = len(*m.PathType)
+		n += 1 + l + sovGenerated(uint64(l))
+	}
 	return n
 }
 
@@ -878,6 +1137,55 @@ func (m *IngressBackend) Size() (n int) {
 	n += 1 + l + sovGenerated(uint64(l))
 	l = m.ServicePort.Size()
 	n += 1 + l + sovGenerated(uint64(l))
+	if m.Resource != nil {
+		l = m.Resource.Size()
+		n += 1 + l + sovGenerated(uint64(l))
+	}
+	return n
+}
+
+func (m *IngressClass) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = m.ObjectMeta.Size()
+	n += 1 + l + sovGenerated(uint64(l))
+	l = m.Spec.Size()
+	n += 1 + l + sovGenerated(uint64(l))
+	return n
+}
+
+func (m *IngressClassList) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = m.ListMeta.Size()
+	n += 1 + l + sovGenerated(uint64(l))
+	if len(m.Items) > 0 {
+		for _, e := range m.Items {
+			l = e.Size()
+			n += 1 + l + sovGenerated(uint64(l))
+		}
+	}
+	return n
+}
+
+func (m *IngressClassSpec) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.Controller)
+	n += 1 + l + sovGenerated(uint64(l))
+	if m.Parameters != nil {
+		l = m.Parameters.Size()
+		n += 1 + l + sovGenerated(uint64(l))
+	}
 	return n
 }
 
@@ -946,6 +1254,10 @@ func (m *IngressSpec) Size() (n int) {
 			n += 1 + l + sovGenerated(uint64(l))
 		}
 	}
+	if m.IngressClassName != nil {
+		l = len(*m.IngressClassName)
+		n += 1 + l + sovGenerated(uint64(l))
+	}
 	return n
 }
 
@@ -990,6 +1302,7 @@ func (this *HTTPIngressPath) String() string {
 	s := strings.Join([]string{`&HTTPIngressPath{`,
 		`Path:` + fmt.Sprintf("%v", this.Path) + `,`,
 		`Backend:` + strings.Replace(strings.Replace(this.Backend.String(), "IngressBackend", "IngressBackend", 1), `&`, ``, 1) + `,`,
+		`PathType:` + valueToStringGenerated(this.PathType) + `,`,
 		`}`,
 	}, "")
 	return s
@@ -1028,6 +1341,45 @@ func (this *IngressBackend) String() string {
 	s := strings.Join([]string{`&IngressBackend{`,
 		`ServiceName:` + fmt.Sprintf("%v", this.ServiceName) + `,`,
 		`ServicePort:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ServicePort), "IntOrString", "intstr.IntOrString", 1), `&`, ``, 1) + `,`,
+		`Resource:` + strings.Replace(fmt.Sprintf("%v", this.Resource), "TypedLocalObjectReference", "v11.TypedLocalObjectReference", 1) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *IngressClass) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&IngressClass{`,
+		`ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`,
+		`Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "IngressClassSpec", "IngressClassSpec", 1), `&`, ``, 1) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *IngressClassList) String() string {
+	if this == nil {
+		return "nil"
+	}
+	repeatedStringForItems := "[]IngressClass{"
+	for _, f := range this.Items {
+		repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "IngressClass", "IngressClass", 1), `&`, ``, 1) + ","
+	}
+	repeatedStringForItems += "}"
+	s := strings.Join([]string{`&IngressClassList{`,
+		`ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`,
+		`Items:` + repeatedStringForItems + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *IngressClassSpec) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&IngressClassSpec{`,
+		`Controller:` + fmt.Sprintf("%v", this.Controller) + `,`,
+		`Parameters:` + strings.Replace(fmt.Sprintf("%v", this.Parameters), "TypedLocalObjectReference", "v11.TypedLocalObjectReference", 1) + `,`,
 		`}`,
 	}, "")
 	return s
@@ -1087,6 +1439,7 @@ func (this *IngressSpec) String() string {
 		`Backend:` + strings.Replace(this.Backend.String(), "IngressBackend", "IngressBackend", 1) + `,`,
 		`TLS:` + repeatedStringForTLS + `,`,
 		`Rules:` + repeatedStringForRules + `,`,
+		`IngressClassName:` + valueToStringGenerated(this.IngressClassName) + `,`,
 		`}`,
 	}, "")
 	return s
@@ -1214,6 +1567,39 @@ func (m *HTTPIngressPath) Unmarshal(dAtA []byte) error {
 				return err
 			}
 			iNdEx = postIndex
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field PathType", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			s := PathType(dAtA[iNdEx:postIndex])
+			m.PathType = &s
+			iNdEx = postIndex
 		default:
 			iNdEx = preIndex
 			skippy, err := skipGenerated(dAtA[iNdEx:])
@@ -1571,6 +1957,402 @@ func (m *IngressBackend) Unmarshal(dAtA []byte) error {
 				return err
 			}
 			iNdEx = postIndex
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Resource == nil {
+				m.Resource = &v11.TypedLocalObjectReference{}
+			}
+			if err := m.Resource.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *IngressClass) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: IngressClass: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: IngressClass: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *IngressClassList) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: IngressClassList: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: IngressClassList: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Items = append(m.Items, IngressClass{})
+			if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *IngressClassSpec) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: IngressClassSpec: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: IngressClassSpec: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Controller", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Controller = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Parameters", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Parameters == nil {
+				m.Parameters = &v11.TypedLocalObjectReference{}
+			}
+			if err := m.Parameters.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
 		default:
 			iNdEx = preIndex
 			skippy, err := skipGenerated(dAtA[iNdEx:])
@@ -2055,6 +2837,39 @@ func (m *IngressSpec) Unmarshal(dAtA []byte) error {
 				return err
 			}
 			iNdEx = postIndex
+		case 4:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field IngressClassName", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			s := string(dAtA[iNdEx:postIndex])
+			m.IngressClassName = &s
+			iNdEx = postIndex
 		default:
 			iNdEx = preIndex
 			skippy, err := skipGenerated(dAtA[iNdEx:])
@@ -2285,6 +3100,7 @@ func (m *IngressTLS) Unmarshal(dAtA []byte) error {
 func skipGenerated(dAtA []byte) (n int, err error) {
 	l := len(dAtA)
 	iNdEx := 0
+	depth := 0
 	for iNdEx < l {
 		var wire uint64
 		for shift := uint(0); ; shift += 7 {
@@ -2316,10 +3132,8 @@ func skipGenerated(dAtA []byte) (n int, err error) {
 					break
 				}
 			}
-			return iNdEx, nil
 		case 1:
 			iNdEx += 8
-			return iNdEx, nil
 		case 2:
 			var length int
 			for shift := uint(0); ; shift += 7 {
@@ -2340,55 +3154,30 @@ func skipGenerated(dAtA []byte) (n int, err error) {
 				return 0, ErrInvalidLengthGenerated
 			}
 			iNdEx += length
-			if iNdEx < 0 {
-				return 0, ErrInvalidLengthGenerated
-			}
-			return iNdEx, nil
 		case 3:
-			for {
-				var innerWire uint64
-				var start int = iNdEx
-				for shift := uint(0); ; shift += 7 {
-					if shift >= 64 {
-						return 0, ErrIntOverflowGenerated
-					}
-					if iNdEx >= l {
-						return 0, io.ErrUnexpectedEOF
-					}
-					b := dAtA[iNdEx]
-					iNdEx++
-					innerWire |= (uint64(b) & 0x7F) << shift
-					if b < 0x80 {
-						break
-					}
-				}
-				innerWireType := int(innerWire & 0x7)
-				if innerWireType == 4 {
-					break
-				}
-				next, err := skipGenerated(dAtA[start:])
-				if err != nil {
-					return 0, err
-				}
-				iNdEx = start + next
-				if iNdEx < 0 {
-					return 0, ErrInvalidLengthGenerated
-				}
-			}
-			return iNdEx, nil
+			depth++
 		case 4:
-			return iNdEx, nil
+			if depth == 0 {
+				return 0, ErrUnexpectedEndOfGroupGenerated
+			}
+			depth--
 		case 5:
 			iNdEx += 4
-			return iNdEx, nil
 		default:
 			return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
 		}
+		if iNdEx < 0 {
+			return 0, ErrInvalidLengthGenerated
+		}
+		if depth == 0 {
+			return iNdEx, nil
+		}
 	}
-	panic("unreachable")
+	return 0, io.ErrUnexpectedEOF
 }
 
 var (
-	ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling")
-	ErrIntOverflowGenerated   = fmt.Errorf("proto: integer overflow")
+	ErrInvalidLengthGenerated        = fmt.Errorf("proto: negative length found during unmarshaling")
+	ErrIntOverflowGenerated          = fmt.Errorf("proto: integer overflow")
+	ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group")
 )
diff --git a/vendor/k8s.io/api/networking/v1beta1/generated.proto b/vendor/k8s.io/api/networking/v1beta1/generated.proto
index a72545c8..68bede81 100644
--- a/vendor/k8s.io/api/networking/v1beta1/generated.proto
+++ b/vendor/k8s.io/api/networking/v1beta1/generated.proto
@@ -30,19 +30,33 @@ import "k8s.io/apimachinery/pkg/util/intstr/generated.proto";
 // Package-wide variables from generator "generated".
 option go_package = "v1beta1";
 
-// HTTPIngressPath associates a path regex with a backend. Incoming urls matching
-// the path are forwarded to the backend.
+// HTTPIngressPath associates a path with a backend. Incoming urls matching the
+// path are forwarded to the backend.
 message HTTPIngressPath {
-  // Path is an extended POSIX regex as defined by IEEE Std 1003.1,
-  // (i.e this follows the egrep/unix syntax, not the perl syntax)
-  // matched against the path of an incoming request. Currently it can
-  // contain characters disallowed from the conventional "path"
-  // part of a URL as defined by RFC 3986. Paths must begin with
-  // a '/'. If unspecified, the path defaults to a catch all sending
-  // traffic to the backend.
+  // Path is matched against the path of an incoming request. Currently it can
+  // contain characters disallowed from the conventional "path" part of a URL
+  // as defined by RFC 3986. Paths must begin with a '/'. When unspecified,
+  // all paths from incoming requests are matched.
   // +optional
   optional string path = 1;
 
+  // PathType determines the interpretation of the Path matching. PathType can
+  // be one of the following values:
+  // * Exact: Matches the URL path exactly.
+  // * Prefix: Matches based on a URL path prefix split by '/'. Matching is
+  //   done on a path element by element basis. A path element refers is the
+  //   list of labels in the path split by the '/' separator. A request is a
+  //   match for path p if every p is an element-wise prefix of p of the
+  //   request path. Note that if the last element of the path is a substring
+  //   of the last element in request path, it is not a match (e.g. /foo/bar
+  //   matches /foo/bar/baz, but does not match /foo/barbaz).
+  // * ImplementationSpecific: Interpretation of the Path matching is up to
+  //   the IngressClass. Implementations can treat this as a separate PathType
+  //   or treat it identically to Prefix or Exact path types.
+  // Implementations are required to support all path types.
+  // Defaults to ImplementationSpecific.
+  optional string pathType = 3;
+
   // Backend defines the referenced service endpoint to which the traffic
   // will be forwarded to.
   optional IngressBackend backend = 2;
@@ -82,10 +96,63 @@ message Ingress {
 // IngressBackend describes all endpoints for a given service and port.
 message IngressBackend {
   // Specifies the name of the referenced service.
+  // +optional
   optional string serviceName = 1;
 
   // Specifies the port of the referenced service.
+  // +optional
   optional k8s.io.apimachinery.pkg.util.intstr.IntOrString servicePort = 2;
+
+  // Resource is an ObjectRef to another Kubernetes resource in the namespace
+  // of the Ingress object. If resource is specified, serviceName and servicePort
+  // must not be specified.
+  // +optional
+  optional k8s.io.api.core.v1.TypedLocalObjectReference resource = 3;
+}
+
+// IngressClass represents the class of the Ingress, referenced by the Ingress
+// Spec. The `ingressclass.kubernetes.io/is-default-class` annotation can be
+// used to indicate that an IngressClass should be considered default. When a
+// single IngressClass resource has this annotation set to true, new Ingress
+// resources without a class specified will be assigned this default class.
+message IngressClass {
+  // Standard object's metadata.
+  // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+  // Spec is the desired state of the IngressClass.
+  // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
+  // +optional
+  optional IngressClassSpec spec = 2;
+}
+
+// IngressClassList is a collection of IngressClasses.
+message IngressClassList {
+  // Standard list metadata.
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
+
+  // Items is the list of IngressClasses.
+  // +listType=set
+  repeated IngressClass items = 2;
+}
+
+// IngressClassSpec provides information about the class of an Ingress.
+message IngressClassSpec {
+  // Controller refers to the name of the controller that should handle this
+  // class. This allows for different "flavors" that are controlled by the
+  // same controller. For example, you may have different Parameters for the
+  // same implementing controller. This should be specified as a
+  // domain-prefixed path no more than 250 characters in length, e.g.
+  // "acme.io/ingress-controller". This field is immutable.
+  optional string controller = 1;
+
+  // Parameters is a link to a custom resource containing additional
+  // configuration for the controller. This is optional if the controller does
+  // not require extra parameters.
+  // +optional
+  optional k8s.io.api.core.v1.TypedLocalObjectReference parameters = 2;
 }
 
 // IngressList is a collection of Ingress.
@@ -103,18 +170,28 @@ message IngressList {
 // the related backend services. Incoming requests are first evaluated for a host
 // match, then routed to the backend associated with the matching IngressRuleValue.
 message IngressRule {
-  // Host is the fully qualified domain name of a network host, as defined
-  // by RFC 3986. Note the following deviations from the "host" part of the
-  // URI as defined in the RFC:
-  // 1. IPs are not allowed. Currently an IngressRuleValue can only apply to the
-  // 	  IP in the Spec of the parent Ingress.
+  // Host is the fully qualified domain name of a network host, as defined by RFC 3986.
+  // Note the following deviations from the "host" part of the
+  // URI as defined in RFC 3986:
+  // 1. IPs are not allowed. Currently an IngressRuleValue can only apply to
+  //    the IP in the Spec of the parent Ingress.
   // 2. The `:` delimiter is not respected because ports are not allowed.
   // 	  Currently the port of an Ingress is implicitly :80 for http and
   // 	  :443 for https.
   // Both these may change in the future.
-  // Incoming requests are matched against the host before the IngressRuleValue.
-  // If the host is unspecified, the Ingress routes all traffic based on the
-  // specified IngressRuleValue.
+  // Incoming requests are matched against the host before the
+  // IngressRuleValue. If the host is unspecified, the Ingress routes all
+  // traffic based on the specified IngressRuleValue.
+  //
+  // Host can be "precise" which is a domain name without the terminating dot of
+  // a network host (e.g. "foo.bar.com") or "wildcard", which is a domain name
+  // prefixed with a single wildcard label (e.g. "*.foo.com").
+  // The wildcard character '*' must appear by itself as the first DNS label and
+  // matches only a single label. You cannot have a wildcard label by itself (e.g. Host == "*").
+  // Requests will be matched against the Host field in the following way:
+  // 1. If Host is precise, the request matches this rule if the http host header is equal to Host.
+  // 2. If Host is a wildcard, then the request matches this rule if the http host header
+  // is to equal to the suffix (removing the first label) of the wildcard rule.
   // +optional
   optional string host = 1;
 
@@ -138,6 +215,19 @@ message IngressRuleValue {
 
 // IngressSpec describes the Ingress the user wishes to exist.
 message IngressSpec {
+  // IngressClassName is the name of the IngressClass cluster resource. The
+  // associated IngressClass defines which controller will implement the
+  // resource. This replaces the deprecated `kubernetes.io/ingress.class`
+  // annotation. For backwards compatibility, when that annotation is set, it
+  // must be given precedence over this field. The controller may emit a
+  // warning if the field and annotation have different values.
+  // Implementations of this API should ignore Ingresses without a class
+  // specified. An IngressClass resource may be marked as default, which can
+  // be used to set a default value for this field. For more information,
+  // refer to the IngressClass documentation.
+  // +optional
+  optional string ingressClassName = 4;
+
   // A default backend capable of servicing requests that don't match any
   // rule. At least one of 'backend' or 'rules' must be specified. This field
   // is optional to allow the loadbalancer controller or defaulting logic to
@@ -175,11 +265,11 @@ message IngressTLS {
   // +optional
   repeated string hosts = 1;
 
-  // SecretName is the name of the secret used to terminate SSL traffic on 443.
-  // Field is left optional to allow SSL routing based on SNI hostname alone.
-  // If the SNI host in a listener conflicts with the "Host" header field used
-  // by an IngressRule, the SNI host is used for termination and value of the
-  // Host header is used for routing.
+  // SecretName is the name of the secret used to terminate TLS traffic on
+  // port 443. Field is left optional to allow TLS routing based on SNI
+  // hostname alone. If the SNI host in a listener conflicts with the "Host"
+  // header field used by an IngressRule, the SNI host is used for termination
+  // and value of the Host header is used for routing.
   // +optional
   optional string secretName = 2;
 }
diff --git a/vendor/k8s.io/api/networking/v1beta1/register.go b/vendor/k8s.io/api/networking/v1beta1/register.go
index c046c490..04234953 100644
--- a/vendor/k8s.io/api/networking/v1beta1/register.go
+++ b/vendor/k8s.io/api/networking/v1beta1/register.go
@@ -49,6 +49,8 @@ func addKnownTypes(scheme *runtime.Scheme) error {
 	scheme.AddKnownTypes(SchemeGroupVersion,
 		&Ingress{},
 		&IngressList{},
+		&IngressClass{},
+		&IngressClassList{},
 	)
 	// Add the watch version that applies
 	metav1.AddToGroupVersion(scheme, SchemeGroupVersion)
diff --git a/vendor/k8s.io/api/networking/v1beta1/types.go b/vendor/k8s.io/api/networking/v1beta1/types.go
index 37277bf8..46f530bf 100644
--- a/vendor/k8s.io/api/networking/v1beta1/types.go
+++ b/vendor/k8s.io/api/networking/v1beta1/types.go
@@ -63,6 +63,19 @@ type IngressList struct {
 
 // IngressSpec describes the Ingress the user wishes to exist.
 type IngressSpec struct {
+	// IngressClassName is the name of the IngressClass cluster resource. The
+	// associated IngressClass defines which controller will implement the
+	// resource. This replaces the deprecated `kubernetes.io/ingress.class`
+	// annotation. For backwards compatibility, when that annotation is set, it
+	// must be given precedence over this field. The controller may emit a
+	// warning if the field and annotation have different values.
+	// Implementations of this API should ignore Ingresses without a class
+	// specified. An IngressClass resource may be marked as default, which can
+	// be used to set a default value for this field. For more information,
+	// refer to the IngressClass documentation.
+	// +optional
+	IngressClassName *string `json:"ingressClassName,omitempty" protobuf:"bytes,4,opt,name=ingressClassName"`
+
 	// A default backend capable of servicing requests that don't match any
 	// rule. At least one of 'backend' or 'rules' must be specified. This field
 	// is optional to allow the loadbalancer controller or defaulting logic to
@@ -93,11 +106,11 @@ type IngressTLS struct {
 	// Ingress, if left unspecified.
 	// +optional
 	Hosts []string `json:"hosts,omitempty" protobuf:"bytes,1,rep,name=hosts"`
-	// SecretName is the name of the secret used to terminate SSL traffic on 443.
-	// Field is left optional to allow SSL routing based on SNI hostname alone.
-	// If the SNI host in a listener conflicts with the "Host" header field used
-	// by an IngressRule, the SNI host is used for termination and value of the
-	// Host header is used for routing.
+	// SecretName is the name of the secret used to terminate TLS traffic on
+	// port 443. Field is left optional to allow TLS routing based on SNI
+	// hostname alone. If the SNI host in a listener conflicts with the "Host"
+	// header field used by an IngressRule, the SNI host is used for termination
+	// and value of the Host header is used for routing.
 	// +optional
 	SecretName string `json:"secretName,omitempty" protobuf:"bytes,2,opt,name=secretName"`
 	// TODO: Consider specifying different modes of termination, protocols etc.
@@ -114,18 +127,28 @@ type IngressStatus struct {
 // the related backend services. Incoming requests are first evaluated for a host
 // match, then routed to the backend associated with the matching IngressRuleValue.
 type IngressRule struct {
-	// Host is the fully qualified domain name of a network host, as defined
-	// by RFC 3986. Note the following deviations from the "host" part of the
-	// URI as defined in the RFC:
-	// 1. IPs are not allowed. Currently an IngressRuleValue can only apply to the
-	//	  IP in the Spec of the parent Ingress.
+	// Host is the fully qualified domain name of a network host, as defined by RFC 3986.
+	// Note the following deviations from the "host" part of the
+	// URI as defined in RFC 3986:
+	// 1. IPs are not allowed. Currently an IngressRuleValue can only apply to
+	//    the IP in the Spec of the parent Ingress.
 	// 2. The `:` delimiter is not respected because ports are not allowed.
 	//	  Currently the port of an Ingress is implicitly :80 for http and
 	//	  :443 for https.
 	// Both these may change in the future.
-	// Incoming requests are matched against the host before the IngressRuleValue.
-	// If the host is unspecified, the Ingress routes all traffic based on the
-	// specified IngressRuleValue.
+	// Incoming requests are matched against the host before the
+	// IngressRuleValue. If the host is unspecified, the Ingress routes all
+	// traffic based on the specified IngressRuleValue.
+	//
+	// Host can be "precise" which is a domain name without the terminating dot of
+	// a network host (e.g. "foo.bar.com") or "wildcard", which is a domain name
+	// prefixed with a single wildcard label (e.g. "*.foo.com").
+	// The wildcard character '*' must appear by itself as the first DNS label and
+	// matches only a single label. You cannot have a wildcard label by itself (e.g. Host == "*").
+	// Requests will be matched against the Host field in the following way:
+	// 1. If Host is precise, the request matches this rule if the http host header is equal to Host.
+	// 2. If Host is a wildcard, then the request matches this rule if the http host header
+	// is to equal to the suffix (removing the first label) of the wildcard rule.
 	// +optional
 	Host string `json:"host,omitempty" protobuf:"bytes,1,opt,name=host"`
 	// IngressRuleValue represents a rule to route requests for this IngressRule.
@@ -164,19 +187,63 @@ type HTTPIngressRuleValue struct {
 	// options usable by a loadbalancer, like http keep-alive.
 }
 
-// HTTPIngressPath associates a path regex with a backend. Incoming urls matching
-// the path are forwarded to the backend.
+// PathType represents the type of path referred to by a HTTPIngressPath.
+type PathType string
+
+const (
+	// PathTypeExact matches the URL path exactly and with case sensitivity.
+	PathTypeExact = PathType("Exact")
+
+	// PathTypePrefix matches based on a URL path prefix split by '/'. Matching
+	// is case sensitive and done on a path element by element basis. A path
+	// element refers to the list of labels in the path split by the '/'
+	// separator. A request is a match for path p if every p is an element-wise
+	// prefix of p of the request path. Note that if the last element of the
+	// path is a substring of the last element in request path, it is not a
+	// match (e.g. /foo/bar matches /foo/bar/baz, but does not match
+	// /foo/barbaz). If multiple matching paths exist in an Ingress spec, the
+	// longest matching path is given priority.
+	// Examples:
+	// - /foo/bar does not match requests to /foo/barbaz
+	// - /foo/bar matches request to /foo/bar and /foo/bar/baz
+	// - /foo and /foo/ both match requests to /foo and /foo/. If both paths are
+	//   present in an Ingress spec, the longest matching path (/foo/) is given
+	//   priority.
+	PathTypePrefix = PathType("Prefix")
+
+	// PathTypeImplementationSpecific matching is up to the IngressClass.
+	// Implementations can treat this as a separate PathType or treat it
+	// identically to Prefix or Exact path types.
+	PathTypeImplementationSpecific = PathType("ImplementationSpecific")
+)
+
+// HTTPIngressPath associates a path with a backend. Incoming urls matching the
+// path are forwarded to the backend.
 type HTTPIngressPath struct {
-	// Path is an extended POSIX regex as defined by IEEE Std 1003.1,
-	// (i.e this follows the egrep/unix syntax, not the perl syntax)
-	// matched against the path of an incoming request. Currently it can
-	// contain characters disallowed from the conventional "path"
-	// part of a URL as defined by RFC 3986. Paths must begin with
-	// a '/'. If unspecified, the path defaults to a catch all sending
-	// traffic to the backend.
+	// Path is matched against the path of an incoming request. Currently it can
+	// contain characters disallowed from the conventional "path" part of a URL
+	// as defined by RFC 3986. Paths must begin with a '/'. When unspecified,
+	// all paths from incoming requests are matched.
 	// +optional
 	Path string `json:"path,omitempty" protobuf:"bytes,1,opt,name=path"`
 
+	// PathType determines the interpretation of the Path matching. PathType can
+	// be one of the following values:
+	// * Exact: Matches the URL path exactly.
+	// * Prefix: Matches based on a URL path prefix split by '/'. Matching is
+	//   done on a path element by element basis. A path element refers is the
+	//   list of labels in the path split by the '/' separator. A request is a
+	//   match for path p if every p is an element-wise prefix of p of the
+	//   request path. Note that if the last element of the path is a substring
+	//   of the last element in request path, it is not a match (e.g. /foo/bar
+	//   matches /foo/bar/baz, but does not match /foo/barbaz).
+	// * ImplementationSpecific: Interpretation of the Path matching is up to
+	//   the IngressClass. Implementations can treat this as a separate PathType
+	//   or treat it identically to Prefix or Exact path types.
+	// Implementations are required to support all path types.
+	// Defaults to ImplementationSpecific.
+	PathType *PathType `json:"pathType,omitempty" protobuf:"bytes,3,opt,name=pathType"`
+
 	// Backend defines the referenced service endpoint to which the traffic
 	// will be forwarded to.
 	Backend IngressBackend `json:"backend" protobuf:"bytes,2,opt,name=backend"`
@@ -185,8 +252,69 @@ type HTTPIngressPath struct {
 // IngressBackend describes all endpoints for a given service and port.
 type IngressBackend struct {
 	// Specifies the name of the referenced service.
-	ServiceName string `json:"serviceName" protobuf:"bytes,1,opt,name=serviceName"`
+	// +optional
+	ServiceName string `json:"serviceName,omitempty" protobuf:"bytes,1,opt,name=serviceName"`
 
 	// Specifies the port of the referenced service.
-	ServicePort intstr.IntOrString `json:"servicePort" protobuf:"bytes,2,opt,name=servicePort"`
+	// +optional
+	ServicePort intstr.IntOrString `json:"servicePort,omitempty" protobuf:"bytes,2,opt,name=servicePort"`
+
+	// Resource is an ObjectRef to another Kubernetes resource in the namespace
+	// of the Ingress object. If resource is specified, serviceName and servicePort
+	// must not be specified.
+	// +optional
+	Resource *v1.TypedLocalObjectReference `json:"resource,omitempty" protobuf:"bytes,3,opt,name=resource"`
+}
+
+// +genclient
+// +genclient:nonNamespaced
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// IngressClass represents the class of the Ingress, referenced by the Ingress
+// Spec. The `ingressclass.kubernetes.io/is-default-class` annotation can be
+// used to indicate that an IngressClass should be considered default. When a
+// single IngressClass resource has this annotation set to true, new Ingress
+// resources without a class specified will be assigned this default class.
+type IngressClass struct {
+	metav1.TypeMeta `json:",inline"`
+	// Standard object's metadata.
+	// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+	// +optional
+	metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+	// Spec is the desired state of the IngressClass.
+	// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
+	// +optional
+	Spec IngressClassSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
+}
+
+// IngressClassSpec provides information about the class of an Ingress.
+type IngressClassSpec struct {
+	// Controller refers to the name of the controller that should handle this
+	// class. This allows for different "flavors" that are controlled by the
+	// same controller. For example, you may have different Parameters for the
+	// same implementing controller. This should be specified as a
+	// domain-prefixed path no more than 250 characters in length, e.g.
+	// "acme.io/ingress-controller". This field is immutable.
+	Controller string `json:"controller,omitempty" protobuf:"bytes,1,opt,name=controller"`
+
+	// Parameters is a link to a custom resource containing additional
+	// configuration for the controller. This is optional if the controller does
+	// not require extra parameters.
+	// +optional
+	Parameters *v1.TypedLocalObjectReference `json:"parameters,omitempty" protobuf:"bytes,2,opt,name=parameters"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// IngressClassList is a collection of IngressClasses.
+type IngressClassList struct {
+	metav1.TypeMeta `json:",inline"`
+	// Standard list metadata.
+	// +optional
+	metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+	// Items is the list of IngressClasses.
+	// +listType=set
+	Items []IngressClass `json:"items" protobuf:"bytes,2,rep,name=items"`
 }
diff --git a/vendor/k8s.io/api/networking/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/networking/v1beta1/types_swagger_doc_generated.go
index 4ae5e32d..c774249d 100644
--- a/vendor/k8s.io/api/networking/v1beta1/types_swagger_doc_generated.go
+++ b/vendor/k8s.io/api/networking/v1beta1/types_swagger_doc_generated.go
@@ -28,9 +28,10 @@ package v1beta1
 
 // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT.
 var map_HTTPIngressPath = map[string]string{
-	"":        "HTTPIngressPath associates a path regex with a backend. Incoming urls matching the path are forwarded to the backend.",
-	"path":    "Path is an extended POSIX regex as defined by IEEE Std 1003.1, (i.e this follows the egrep/unix syntax, not the perl syntax) matched against the path of an incoming request. Currently it can contain characters disallowed from the conventional \"path\" part of a URL as defined by RFC 3986. Paths must begin with a '/'. If unspecified, the path defaults to a catch all sending traffic to the backend.",
-	"backend": "Backend defines the referenced service endpoint to which the traffic will be forwarded to.",
+	"":         "HTTPIngressPath associates a path with a backend. Incoming urls matching the path are forwarded to the backend.",
+	"path":     "Path is matched against the path of an incoming request. Currently it can contain characters disallowed from the conventional \"path\" part of a URL as defined by RFC 3986. Paths must begin with a '/'. When unspecified, all paths from incoming requests are matched.",
+	"pathType": "PathType determines the interpretation of the Path matching. PathType can be one of the following values: * Exact: Matches the URL path exactly. * Prefix: Matches based on a URL path prefix split by '/'. Matching is\n  done on a path element by element basis. A path element refers is the\n  list of labels in the path split by the '/' separator. A request is a\n  match for path p if every p is an element-wise prefix of p of the\n  request path. Note that if the last element of the path is a substring\n  of the last element in request path, it is not a match (e.g. /foo/bar\n  matches /foo/bar/baz, but does not match /foo/barbaz).\n* ImplementationSpecific: Interpretation of the Path matching is up to\n  the IngressClass. Implementations can treat this as a separate PathType\n  or treat it identically to Prefix or Exact path types.\nImplementations are required to support all path types. Defaults to ImplementationSpecific.",
+	"backend":  "Backend defines the referenced service endpoint to which the traffic will be forwarded to.",
 }
 
 func (HTTPIngressPath) SwaggerDoc() map[string]string {
@@ -61,12 +62,43 @@ var map_IngressBackend = map[string]string{
 	"":            "IngressBackend describes all endpoints for a given service and port.",
 	"serviceName": "Specifies the name of the referenced service.",
 	"servicePort": "Specifies the port of the referenced service.",
+	"resource":    "Resource is an ObjectRef to another Kubernetes resource in the namespace of the Ingress object. If resource is specified, serviceName and servicePort must not be specified.",
 }
 
 func (IngressBackend) SwaggerDoc() map[string]string {
 	return map_IngressBackend
 }
 
+var map_IngressClass = map[string]string{
+	"":         "IngressClass represents the class of the Ingress, referenced by the Ingress Spec. The `ingressclass.kubernetes.io/is-default-class` annotation can be used to indicate that an IngressClass should be considered default. When a single IngressClass resource has this annotation set to true, new Ingress resources without a class specified will be assigned this default class.",
+	"metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+	"spec":     "Spec is the desired state of the IngressClass. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status",
+}
+
+func (IngressClass) SwaggerDoc() map[string]string {
+	return map_IngressClass
+}
+
+var map_IngressClassList = map[string]string{
+	"":         "IngressClassList is a collection of IngressClasses.",
+	"metadata": "Standard list metadata.",
+	"items":    "Items is the list of IngressClasses.",
+}
+
+func (IngressClassList) SwaggerDoc() map[string]string {
+	return map_IngressClassList
+}
+
+var map_IngressClassSpec = map[string]string{
+	"":           "IngressClassSpec provides information about the class of an Ingress.",
+	"controller": "Controller refers to the name of the controller that should handle this class. This allows for different \"flavors\" that are controlled by the same controller. For example, you may have different Parameters for the same implementing controller. This should be specified as a domain-prefixed path no more than 250 characters in length, e.g. \"acme.io/ingress-controller\". This field is immutable.",
+	"parameters": "Parameters is a link to a custom resource containing additional configuration for the controller. This is optional if the controller does not require extra parameters.",
+}
+
+func (IngressClassSpec) SwaggerDoc() map[string]string {
+	return map_IngressClassSpec
+}
+
 var map_IngressList = map[string]string{
 	"":         "IngressList is a collection of Ingress.",
 	"metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
@@ -79,7 +111,7 @@ func (IngressList) SwaggerDoc() map[string]string {
 
 var map_IngressRule = map[string]string{
 	"":     "IngressRule represents the rules mapping the paths under a specified host to the related backend services. Incoming requests are first evaluated for a host match, then routed to the backend associated with the matching IngressRuleValue.",
-	"host": "Host is the fully qualified domain name of a network host, as defined by RFC 3986. Note the following deviations from the \"host\" part of the URI as defined in the RFC: 1. IPs are not allowed. Currently an IngressRuleValue can only apply to the\n\t  IP in the Spec of the parent Ingress.\n2. The `:` delimiter is not respected because ports are not allowed.\n\t  Currently the port of an Ingress is implicitly :80 for http and\n\t  :443 for https.\nBoth these may change in the future. Incoming requests are matched against the host before the IngressRuleValue. If the host is unspecified, the Ingress routes all traffic based on the specified IngressRuleValue.",
+	"host": "Host is the fully qualified domain name of a network host, as defined by RFC 3986. Note the following deviations from the \"host\" part of the URI as defined in RFC 3986: 1. IPs are not allowed. Currently an IngressRuleValue can only apply to\n   the IP in the Spec of the parent Ingress.\n2. The `:` delimiter is not respected because ports are not allowed.\n\t  Currently the port of an Ingress is implicitly :80 for http and\n\t  :443 for https.\nBoth these may change in the future. Incoming requests are matched against the host before the IngressRuleValue. If the host is unspecified, the Ingress routes all traffic based on the specified IngressRuleValue.\n\nHost can be \"precise\" which is a domain name without the terminating dot of a network host (e.g. \"foo.bar.com\") or \"wildcard\", which is a domain name prefixed with a single wildcard label (e.g. \"*.foo.com\"). The wildcard character '*' must appear by itself as the first DNS label and matches only a single label. You cannot have a wildcard label by itself (e.g. Host == \"*\"). Requests will be matched against the Host field in the following way: 1. If Host is precise, the request matches this rule if the http host header is equal to Host. 2. If Host is a wildcard, then the request matches this rule if the http host header is to equal to the suffix (removing the first label) of the wildcard rule.",
 }
 
 func (IngressRule) SwaggerDoc() map[string]string {
@@ -95,10 +127,11 @@ func (IngressRuleValue) SwaggerDoc() map[string]string {
 }
 
 var map_IngressSpec = map[string]string{
-	"":        "IngressSpec describes the Ingress the user wishes to exist.",
-	"backend": "A default backend capable of servicing requests that don't match any rule. At least one of 'backend' or 'rules' must be specified. This field is optional to allow the loadbalancer controller or defaulting logic to specify a global default.",
-	"tls":     "TLS configuration. Currently the Ingress only supports a single TLS port, 443. If multiple members of this list specify different hosts, they will be multiplexed on the same port according to the hostname specified through the SNI TLS extension, if the ingress controller fulfilling the ingress supports SNI.",
-	"rules":   "A list of host rules used to configure the Ingress. If unspecified, or no rule matches, all traffic is sent to the default backend.",
+	"":                 "IngressSpec describes the Ingress the user wishes to exist.",
+	"ingressClassName": "IngressClassName is the name of the IngressClass cluster resource. The associated IngressClass defines which controller will implement the resource. This replaces the deprecated `kubernetes.io/ingress.class` annotation. For backwards compatibility, when that annotation is set, it must be given precedence over this field. The controller may emit a warning if the field and annotation have different values. Implementations of this API should ignore Ingresses without a class specified. An IngressClass resource may be marked as default, which can be used to set a default value for this field. For more information, refer to the IngressClass documentation.",
+	"backend":          "A default backend capable of servicing requests that don't match any rule. At least one of 'backend' or 'rules' must be specified. This field is optional to allow the loadbalancer controller or defaulting logic to specify a global default.",
+	"tls":              "TLS configuration. Currently the Ingress only supports a single TLS port, 443. If multiple members of this list specify different hosts, they will be multiplexed on the same port according to the hostname specified through the SNI TLS extension, if the ingress controller fulfilling the ingress supports SNI.",
+	"rules":            "A list of host rules used to configure the Ingress. If unspecified, or no rule matches, all traffic is sent to the default backend.",
 }
 
 func (IngressSpec) SwaggerDoc() map[string]string {
@@ -117,7 +150,7 @@ func (IngressStatus) SwaggerDoc() map[string]string {
 var map_IngressTLS = map[string]string{
 	"":           "IngressTLS describes the transport layer security associated with an Ingress.",
 	"hosts":      "Hosts are a list of hosts included in the TLS certificate. The values in this list must match the name/s used in the tlsSecret. Defaults to the wildcard host setting for the loadbalancer controller fulfilling this Ingress, if left unspecified.",
-	"secretName": "SecretName is the name of the secret used to terminate SSL traffic on 443. Field is left optional to allow SSL routing based on SNI hostname alone. If the SNI host in a listener conflicts with the \"Host\" header field used by an IngressRule, the SNI host is used for termination and value of the Host header is used for routing.",
+	"secretName": "SecretName is the name of the secret used to terminate TLS traffic on port 443. Field is left optional to allow TLS routing based on SNI hostname alone. If the SNI host in a listener conflicts with the \"Host\" header field used by an IngressRule, the SNI host is used for termination and value of the Host header is used for routing.",
 }
 
 func (IngressTLS) SwaggerDoc() map[string]string {
diff --git a/vendor/k8s.io/api/networking/v1beta1/well_known_annotations.go b/vendor/k8s.io/api/networking/v1beta1/well_known_annotations.go
new file mode 100644
index 00000000..1629b5d5
--- /dev/null
+++ b/vendor/k8s.io/api/networking/v1beta1/well_known_annotations.go
@@ -0,0 +1,32 @@
+/*
+Copyright 2020 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1beta1
+
+const (
+	// AnnotationIsDefaultIngressClass can be used to indicate that an
+	// IngressClass should be considered default. When a single IngressClass
+	// resource has this annotation set to true, new Ingress resources without a
+	// class specified will be assigned this default class.
+	AnnotationIsDefaultIngressClass = "ingressclass.kubernetes.io/is-default-class"
+
+	// AnnotationIngressClass indicates the class of an Ingress to be used when
+	// determining which controller should implement the Ingress. Use of this
+	// annotation is deprecated. The Ingress class field should be used instead
+	// of this annotation.
+	// +deprecated
+	AnnotationIngressClass = "kubernetes.io/ingress.class"
+)
diff --git a/vendor/k8s.io/api/networking/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/networking/v1beta1/zz_generated.deepcopy.go
index 16ac936a..d55ccde6 100644
--- a/vendor/k8s.io/api/networking/v1beta1/zz_generated.deepcopy.go
+++ b/vendor/k8s.io/api/networking/v1beta1/zz_generated.deepcopy.go
@@ -21,13 +21,19 @@ limitations under the License.
 package v1beta1
 
 import (
+	v1 "k8s.io/api/core/v1"
 	runtime "k8s.io/apimachinery/pkg/runtime"
 )
 
 // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
 func (in *HTTPIngressPath) DeepCopyInto(out *HTTPIngressPath) {
 	*out = *in
-	out.Backend = in.Backend
+	if in.PathType != nil {
+		in, out := &in.PathType, &out.PathType
+		*out = new(PathType)
+		**out = **in
+	}
+	in.Backend.DeepCopyInto(&out.Backend)
 	return
 }
 
@@ -47,7 +53,9 @@ func (in *HTTPIngressRuleValue) DeepCopyInto(out *HTTPIngressRuleValue) {
 	if in.Paths != nil {
 		in, out := &in.Paths, &out.Paths
 		*out = make([]HTTPIngressPath, len(*in))
-		copy(*out, *in)
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
 	}
 	return
 }
@@ -94,6 +102,11 @@ func (in *Ingress) DeepCopyObject() runtime.Object {
 func (in *IngressBackend) DeepCopyInto(out *IngressBackend) {
 	*out = *in
 	out.ServicePort = in.ServicePort
+	if in.Resource != nil {
+		in, out := &in.Resource, &out.Resource
+		*out = new(v1.TypedLocalObjectReference)
+		(*in).DeepCopyInto(*out)
+	}
 	return
 }
 
@@ -107,6 +120,87 @@ func (in *IngressBackend) DeepCopy() *IngressBackend {
 	return out
 }
 
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *IngressClass) DeepCopyInto(out *IngressClass) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+	in.Spec.DeepCopyInto(&out.Spec)
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressClass.
+func (in *IngressClass) DeepCopy() *IngressClass {
+	if in == nil {
+		return nil
+	}
+	out := new(IngressClass)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *IngressClass) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *IngressClassList) DeepCopyInto(out *IngressClassList) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	in.ListMeta.DeepCopyInto(&out.ListMeta)
+	if in.Items != nil {
+		in, out := &in.Items, &out.Items
+		*out = make([]IngressClass, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressClassList.
+func (in *IngressClassList) DeepCopy() *IngressClassList {
+	if in == nil {
+		return nil
+	}
+	out := new(IngressClassList)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *IngressClassList) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *IngressClassSpec) DeepCopyInto(out *IngressClassSpec) {
+	*out = *in
+	if in.Parameters != nil {
+		in, out := &in.Parameters, &out.Parameters
+		*out = new(v1.TypedLocalObjectReference)
+		(*in).DeepCopyInto(*out)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressClassSpec.
+func (in *IngressClassSpec) DeepCopy() *IngressClassSpec {
+	if in == nil {
+		return nil
+	}
+	out := new(IngressClassSpec)
+	in.DeepCopyInto(out)
+	return out
+}
+
 // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
 func (in *IngressList) DeepCopyInto(out *IngressList) {
 	*out = *in
@@ -181,10 +275,15 @@ func (in *IngressRuleValue) DeepCopy() *IngressRuleValue {
 // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
 func (in *IngressSpec) DeepCopyInto(out *IngressSpec) {
 	*out = *in
+	if in.IngressClassName != nil {
+		in, out := &in.IngressClassName, &out.IngressClassName
+		*out = new(string)
+		**out = **in
+	}
 	if in.Backend != nil {
 		in, out := &in.Backend, &out.Backend
 		*out = new(IngressBackend)
-		**out = **in
+		(*in).DeepCopyInto(*out)
 	}
 	if in.TLS != nil {
 		in, out := &in.TLS, &out.TLS
diff --git a/vendor/k8s.io/api/node/v1alpha1/generated.pb.go b/vendor/k8s.io/api/node/v1alpha1/generated.pb.go
index 34f4dd6d..e6658a96 100644
--- a/vendor/k8s.io/api/node/v1alpha1/generated.pb.go
+++ b/vendor/k8s.io/api/node/v1alpha1/generated.pb.go
@@ -46,7 +46,7 @@ var _ = math.Inf
 // is compatible with the proto package it is being compiled against.
 // A compilation error at this line likely means your copy of the
 // proto package needs to be updated.
-const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
+const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
 
 func (m *Overhead) Reset()      { *m = Overhead{} }
 func (*Overhead) ProtoMessage() {}
@@ -1500,6 +1500,7 @@ func (m *Scheduling) Unmarshal(dAtA []byte) error {
 func skipGenerated(dAtA []byte) (n int, err error) {
 	l := len(dAtA)
 	iNdEx := 0
+	depth := 0
 	for iNdEx < l {
 		var wire uint64
 		for shift := uint(0); ; shift += 7 {
@@ -1531,10 +1532,8 @@ func skipGenerated(dAtA []byte) (n int, err error) {
 					break
 				}
 			}
-			return iNdEx, nil
 		case 1:
 			iNdEx += 8
-			return iNdEx, nil
 		case 2:
 			var length int
 			for shift := uint(0); ; shift += 7 {
@@ -1555,55 +1554,30 @@ func skipGenerated(dAtA []byte) (n int, err error) {
 				return 0, ErrInvalidLengthGenerated
 			}
 			iNdEx += length
-			if iNdEx < 0 {
-				return 0, ErrInvalidLengthGenerated
-			}
-			return iNdEx, nil
 		case 3:
-			for {
-				var innerWire uint64
-				var start int = iNdEx
-				for shift := uint(0); ; shift += 7 {
-					if shift >= 64 {
-						return 0, ErrIntOverflowGenerated
-					}
-					if iNdEx >= l {
-						return 0, io.ErrUnexpectedEOF
-					}
-					b := dAtA[iNdEx]
-					iNdEx++
-					innerWire |= (uint64(b) & 0x7F) << shift
-					if b < 0x80 {
-						break
-					}
-				}
-				innerWireType := int(innerWire & 0x7)
-				if innerWireType == 4 {
-					break
-				}
-				next, err := skipGenerated(dAtA[start:])
-				if err != nil {
-					return 0, err
-				}
-				iNdEx = start + next
-				if iNdEx < 0 {
-					return 0, ErrInvalidLengthGenerated
-				}
-			}
-			return iNdEx, nil
+			depth++
 		case 4:
-			return iNdEx, nil
+			if depth == 0 {
+				return 0, ErrUnexpectedEndOfGroupGenerated
+			}
+			depth--
 		case 5:
 			iNdEx += 4
-			return iNdEx, nil
 		default:
 			return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
 		}
+		if iNdEx < 0 {
+			return 0, ErrInvalidLengthGenerated
+		}
+		if depth == 0 {
+			return iNdEx, nil
+		}
 	}
-	panic("unreachable")
+	return 0, io.ErrUnexpectedEOF
 }
 
 var (
-	ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling")
-	ErrIntOverflowGenerated   = fmt.Errorf("proto: integer overflow")
+	ErrInvalidLengthGenerated        = fmt.Errorf("proto: negative length found during unmarshaling")
+	ErrIntOverflowGenerated          = fmt.Errorf("proto: integer overflow")
+	ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group")
 )
diff --git a/vendor/k8s.io/api/node/v1beta1/generated.pb.go b/vendor/k8s.io/api/node/v1beta1/generated.pb.go
index 63992f43..b85cbd29 100644
--- a/vendor/k8s.io/api/node/v1beta1/generated.pb.go
+++ b/vendor/k8s.io/api/node/v1beta1/generated.pb.go
@@ -46,7 +46,7 @@ var _ = math.Inf
 // is compatible with the proto package it is being compiled against.
 // A compilation error at this line likely means your copy of the
 // proto package needs to be updated.
-const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
+const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
 
 func (m *Overhead) Reset()      { *m = Overhead{} }
 func (*Overhead) ProtoMessage() {}
@@ -1329,6 +1329,7 @@ func (m *Scheduling) Unmarshal(dAtA []byte) error {
 func skipGenerated(dAtA []byte) (n int, err error) {
 	l := len(dAtA)
 	iNdEx := 0
+	depth := 0
 	for iNdEx < l {
 		var wire uint64
 		for shift := uint(0); ; shift += 7 {
@@ -1360,10 +1361,8 @@ func skipGenerated(dAtA []byte) (n int, err error) {
 					break
 				}
 			}
-			return iNdEx, nil
 		case 1:
 			iNdEx += 8
-			return iNdEx, nil
 		case 2:
 			var length int
 			for shift := uint(0); ; shift += 7 {
@@ -1384,55 +1383,30 @@ func skipGenerated(dAtA []byte) (n int, err error) {
 				return 0, ErrInvalidLengthGenerated
 			}
 			iNdEx += length
-			if iNdEx < 0 {
-				return 0, ErrInvalidLengthGenerated
-			}
-			return iNdEx, nil
 		case 3:
-			for {
-				var innerWire uint64
-				var start int = iNdEx
-				for shift := uint(0); ; shift += 7 {
-					if shift >= 64 {
-						return 0, ErrIntOverflowGenerated
-					}
-					if iNdEx >= l {
-						return 0, io.ErrUnexpectedEOF
-					}
-					b := dAtA[iNdEx]
-					iNdEx++
-					innerWire |= (uint64(b) & 0x7F) << shift
-					if b < 0x80 {
-						break
-					}
-				}
-				innerWireType := int(innerWire & 0x7)
-				if innerWireType == 4 {
-					break
-				}
-				next, err := skipGenerated(dAtA[start:])
-				if err != nil {
-					return 0, err
-				}
-				iNdEx = start + next
-				if iNdEx < 0 {
-					return 0, ErrInvalidLengthGenerated
-				}
-			}
-			return iNdEx, nil
+			depth++
 		case 4:
-			return iNdEx, nil
+			if depth == 0 {
+				return 0, ErrUnexpectedEndOfGroupGenerated
+			}
+			depth--
 		case 5:
 			iNdEx += 4
-			return iNdEx, nil
 		default:
 			return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
 		}
+		if iNdEx < 0 {
+			return 0, ErrInvalidLengthGenerated
+		}
+		if depth == 0 {
+			return iNdEx, nil
+		}
 	}
-	panic("unreachable")
+	return 0, io.ErrUnexpectedEOF
 }
 
 var (
-	ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling")
-	ErrIntOverflowGenerated   = fmt.Errorf("proto: integer overflow")
+	ErrInvalidLengthGenerated        = fmt.Errorf("proto: negative length found during unmarshaling")
+	ErrIntOverflowGenerated          = fmt.Errorf("proto: integer overflow")
+	ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group")
 )
diff --git a/vendor/k8s.io/api/policy/v1beta1/generated.pb.go b/vendor/k8s.io/api/policy/v1beta1/generated.pb.go
index 5b57f699..40ec7ef7 100644
--- a/vendor/k8s.io/api/policy/v1beta1/generated.pb.go
+++ b/vendor/k8s.io/api/policy/v1beta1/generated.pb.go
@@ -47,7 +47,7 @@ var _ = math.Inf
 // is compatible with the proto package it is being compiled against.
 // A compilation error at this line likely means your copy of the
 // proto package needs to be updated.
-const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
+const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
 
 func (m *AllowedCSIDriver) Reset()      { *m = AllowedCSIDriver{} }
 func (*AllowedCSIDriver) ProtoMessage() {}
@@ -609,125 +609,125 @@ func init() {
 }
 
 var fileDescriptor_014060e454a820dc = []byte{
-	// 1883 bytes of a gzipped FileDescriptorProto
+	// 1878 bytes of a gzipped FileDescriptorProto
 	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x58, 0xdd, 0x6e, 0x1b, 0xc7,
-	0x15, 0xd6, 0x9a, 0xfa, 0xa1, 0x46, 0x3f, 0x16, 0x47, 0x3f, 0x5e, 0x2b, 0x35, 0xd7, 0xd9, 0x00,
+	0x15, 0xd6, 0x9a, 0xfa, 0xa1, 0x46, 0x3f, 0x16, 0x47, 0x3f, 0x5e, 0x2b, 0x0d, 0xd7, 0xd9, 0x00,
 	0x85, 0x9b, 0x26, 0xcb, 0x58, 0x76, 0x5c, 0xa3, 0x69, 0x8b, 0x68, 0x45, 0xc9, 0x56, 0x60, 0x59,
 	0xec, 0xd0, 0x0e, 0xda, 0xc2, 0x2d, 0x3a, 0xe4, 0x8e, 0xa8, 0x8d, 0x96, 0xbb, 0xdb, 0x99, 0x59,
 	0x46, 0xbc, 0xeb, 0x45, 0x2f, 0x7a, 0xd9, 0x17, 0x08, 0xfa, 0x00, 0x45, 0xaf, 0xfa, 0x12, 0x0e,
-	0x50, 0x04, 0xb9, 0x0c, 0x7a, 0x41, 0xd4, 0x2c, 0xfa, 0x12, 0xbe, 0x0a, 0x76, 0x38, 0xbb, 0xe4,
-	0xfe, 0x91, 0x76, 0x00, 0xfb, 0x8e, 0x3b, 0xe7, 0xfb, 0xbe, 0x33, 0x73, 0xe6, 0xcc, 0x99, 0xc3,
-	0x01, 0xe6, 0xc5, 0x7d, 0x66, 0xd8, 0x5e, 0xed, 0x22, 0x68, 0x11, 0xea, 0x12, 0x4e, 0x58, 0xad,
-	0x47, 0x5c, 0xcb, 0xa3, 0x35, 0x69, 0xc0, 0xbe, 0x5d, 0xf3, 0x3d, 0xc7, 0x6e, 0xf7, 0x6b, 0xbd,
-	0xdb, 0x2d, 0xc2, 0xf1, 0xed, 0x5a, 0x87, 0xb8, 0x84, 0x62, 0x4e, 0x2c, 0xc3, 0xa7, 0x1e, 0xf7,
-	0xe0, 0xf5, 0x11, 0xd4, 0xc0, 0xbe, 0x6d, 0x8c, 0xa0, 0x86, 0x84, 0xee, 0x7e, 0xd8, 0xb1, 0xf9,
-	0x79, 0xd0, 0x32, 0xda, 0x5e, 0xb7, 0xd6, 0xf1, 0x3a, 0x5e, 0x4d, 0x30, 0x5a, 0xc1, 0x99, 0xf8,
-	0x12, 0x1f, 0xe2, 0xd7, 0x48, 0x69, 0x57, 0x9f, 0x70, 0xda, 0xf6, 0x28, 0xa9, 0xf5, 0x32, 0xde,
-	0x76, 0xef, 0x8e, 0x31, 0x5d, 0xdc, 0x3e, 0xb7, 0x5d, 0x42, 0xfb, 0x35, 0xff, 0xa2, 0x13, 0x0e,
-	0xb0, 0x5a, 0x97, 0x70, 0x9c, 0xc7, 0xaa, 0x15, 0xb1, 0x68, 0xe0, 0x72, 0xbb, 0x4b, 0x32, 0x84,
-	0x7b, 0xb3, 0x08, 0xac, 0x7d, 0x4e, 0xba, 0x38, 0xc3, 0xbb, 0x53, 0xc4, 0x0b, 0xb8, 0xed, 0xd4,
-	0x6c, 0x97, 0x33, 0x4e, 0xd3, 0x24, 0xfd, 0x2e, 0xd8, 0xd8, 0x77, 0x1c, 0xef, 0x4b, 0x62, 0x1d,
-	0x34, 0x8f, 0xeb, 0xd4, 0xee, 0x11, 0x0a, 0x6f, 0x82, 0x79, 0x17, 0x77, 0x89, 0xaa, 0xdc, 0x54,
-	0x6e, 0x2d, 0x9b, 0xab, 0xcf, 0x07, 0xda, 0xdc, 0x70, 0xa0, 0xcd, 0x3f, 0xc6, 0x5d, 0x82, 0x84,
-	0x45, 0xff, 0x04, 0x54, 0x24, 0xeb, 0xc8, 0x21, 0x97, 0x9f, 0x7b, 0x4e, 0xd0, 0x25, 0xf0, 0xc7,
-	0x60, 0xd1, 0x12, 0x02, 0x92, 0xb8, 0x2e, 0x89, 0x8b, 0x23, 0x59, 0x24, 0xad, 0x3a, 0x03, 0x57,
-	0x25, 0xf9, 0xa1, 0xc7, 0x78, 0x03, 0xf3, 0x73, 0xb8, 0x07, 0x80, 0x8f, 0xf9, 0x79, 0x83, 0x92,
-	0x33, 0xfb, 0x52, 0xd2, 0xa1, 0xa4, 0x83, 0x46, 0x6c, 0x41, 0x13, 0x28, 0xf8, 0x01, 0x28, 0x53,
-	0x82, 0xad, 0x53, 0xd7, 0xe9, 0xab, 0x57, 0x6e, 0x2a, 0xb7, 0xca, 0xe6, 0x86, 0x64, 0x94, 0x91,
-	0x1c, 0x47, 0x31, 0x42, 0xff, 0x8f, 0x02, 0xca, 0x87, 0x3d, 0xbb, 0xcd, 0x6d, 0xcf, 0x85, 0x7f,
-	0x04, 0xe5, 0x70, 0xb7, 0x2c, 0xcc, 0xb1, 0x70, 0xb6, 0xb2, 0xf7, 0x91, 0x31, 0xce, 0xa4, 0x38,
-	0x78, 0x86, 0x7f, 0xd1, 0x09, 0x07, 0x98, 0x11, 0xa2, 0x8d, 0xde, 0x6d, 0xe3, 0xb4, 0xf5, 0x05,
-	0x69, 0xf3, 0x13, 0xc2, 0xf1, 0x78, 0x7a, 0xe3, 0x31, 0x14, 0xab, 0x42, 0x07, 0xac, 0x59, 0xc4,
-	0x21, 0x9c, 0x9c, 0xfa, 0xa1, 0x47, 0x26, 0x66, 0xb8, 0xb2, 0x77, 0xe7, 0xd5, 0xdc, 0xd4, 0x27,
-	0xa9, 0x66, 0x65, 0x38, 0xd0, 0xd6, 0x12, 0x43, 0x28, 0x29, 0xae, 0x7f, 0xa5, 0x80, 0x9d, 0xa3,
-	0xe6, 0x03, 0xea, 0x05, 0x7e, 0x93, 0x87, 0xbb, 0xdb, 0xe9, 0x4b, 0x13, 0xfc, 0x19, 0x98, 0xa7,
-	0x81, 0x13, 0xed, 0xe5, 0x7b, 0xd1, 0x5e, 0xa2, 0xc0, 0x21, 0x2f, 0x07, 0xda, 0x66, 0x8a, 0xf5,
-	0xa4, 0xef, 0x13, 0x24, 0x08, 0xf0, 0x33, 0xb0, 0x48, 0xb1, 0xdb, 0x21, 0xe1, 0xd4, 0x4b, 0xb7,
-	0x56, 0xf6, 0x74, 0xa3, 0xf0, 0xac, 0x19, 0xc7, 0x75, 0x14, 0x42, 0xc7, 0x3b, 0x2e, 0x3e, 0x19,
-	0x92, 0x0a, 0xfa, 0x09, 0x58, 0x13, 0x5b, 0xed, 0x51, 0x2e, 0x2c, 0xf0, 0x06, 0x28, 0x75, 0x6d,
-	0x57, 0x4c, 0x6a, 0xc1, 0x5c, 0x91, 0xac, 0xd2, 0x89, 0xed, 0xa2, 0x70, 0x5c, 0x98, 0xf1, 0xa5,
-	0x88, 0xd9, 0xa4, 0x19, 0x5f, 0xa2, 0x70, 0x5c, 0x7f, 0x00, 0x96, 0xa4, 0xc7, 0x49, 0xa1, 0xd2,
-	0x74, 0xa1, 0x52, 0x8e, 0xd0, 0x3f, 0xae, 0x80, 0xcd, 0x86, 0x67, 0xd5, 0x6d, 0x46, 0x03, 0x11,
-	0x2f, 0x33, 0xb0, 0x3a, 0x84, 0xbf, 0x85, 0xfc, 0x78, 0x02, 0xe6, 0x99, 0x4f, 0xda, 0x32, 0x2d,
-	0xf6, 0xa6, 0xc4, 0x36, 0x67, 0x7e, 0x4d, 0x9f, 0xb4, 0xc7, 0xc7, 0x32, 0xfc, 0x42, 0x42, 0x0d,
-	0x3e, 0x03, 0x8b, 0x8c, 0x63, 0x1e, 0x30, 0xb5, 0x24, 0x74, 0xef, 0xbe, 0xa6, 0xae, 0xe0, 0x8e,
-	0x77, 0x71, 0xf4, 0x8d, 0xa4, 0xa6, 0xfe, 0x6f, 0x05, 0x5c, 0xcb, 0x61, 0x3d, 0xb2, 0x19, 0x87,
-	0xcf, 0x32, 0x11, 0x33, 0x5e, 0x2d, 0x62, 0x21, 0x5b, 0xc4, 0x2b, 0x3e, 0xbc, 0xd1, 0xc8, 0x44,
-	0xb4, 0x9a, 0x60, 0xc1, 0xe6, 0xa4, 0x1b, 0xa5, 0xa2, 0xf1, 0x7a, 0xcb, 0x32, 0xd7, 0xa4, 0xf4,
-	0xc2, 0x71, 0x28, 0x82, 0x46, 0x5a, 0xfa, 0x37, 0x57, 0x72, 0x97, 0x13, 0x86, 0x13, 0x9e, 0x81,
-	0xd5, 0xae, 0xed, 0xee, 0xf7, 0xb0, 0xed, 0xe0, 0x96, 0x3c, 0x3d, 0xd3, 0x92, 0x20, 0xac, 0xb0,
-	0xc6, 0xa8, 0xc2, 0x1a, 0xc7, 0x2e, 0x3f, 0xa5, 0x4d, 0x4e, 0x6d, 0xb7, 0x63, 0x6e, 0x0c, 0x07,
-	0xda, 0xea, 0xc9, 0x84, 0x12, 0x4a, 0xe8, 0xc2, 0xdf, 0x83, 0x32, 0x23, 0x0e, 0x69, 0x73, 0x8f,
-	0xbe, 0x5e, 0x85, 0x78, 0x84, 0x5b, 0xc4, 0x69, 0x4a, 0xaa, 0xb9, 0x1a, 0xc6, 0x2d, 0xfa, 0x42,
-	0xb1, 0x24, 0x74, 0xc0, 0x7a, 0x17, 0x5f, 0x3e, 0x75, 0x71, 0xbc, 0x90, 0xd2, 0x0f, 0x5c, 0x08,
-	0x1c, 0x0e, 0xb4, 0xf5, 0x93, 0x84, 0x16, 0x4a, 0x69, 0xeb, 0xff, 0x9f, 0x07, 0xd7, 0x0b, 0xb3,
-	0x0a, 0x7e, 0x06, 0xa0, 0xd7, 0x62, 0x84, 0xf6, 0x88, 0xf5, 0x60, 0x74, 0x07, 0xd9, 0x5e, 0x74,
-	0x70, 0x77, 0xe5, 0x06, 0xc1, 0xd3, 0x0c, 0x02, 0xe5, 0xb0, 0xe0, 0x5f, 0x14, 0xb0, 0x66, 0x8d,
-	0xdc, 0x10, 0xab, 0xe1, 0x59, 0x51, 0x62, 0x3c, 0xf8, 0x21, 0xf9, 0x6e, 0xd4, 0x27, 0x95, 0x0e,
-	0x5d, 0x4e, 0xfb, 0xe6, 0xb6, 0x9c, 0xd0, 0x5a, 0xc2, 0x86, 0x92, 0x4e, 0xe1, 0x09, 0x80, 0x56,
-	0x2c, 0xc9, 0xe4, 0x9d, 0x26, 0x42, 0xbc, 0x60, 0xde, 0x90, 0x0a, 0xdb, 0x09, 0xbf, 0x11, 0x08,
-	0xe5, 0x10, 0xe1, 0xaf, 0xc0, 0x7a, 0x3b, 0xa0, 0x94, 0xb8, 0xfc, 0x21, 0xc1, 0x0e, 0x3f, 0xef,
-	0xab, 0xf3, 0x42, 0x6a, 0x47, 0x4a, 0xad, 0x1f, 0x24, 0xac, 0x28, 0x85, 0x0e, 0xf9, 0x16, 0x61,
-	0x36, 0x25, 0x56, 0xc4, 0x5f, 0x48, 0xf2, 0xeb, 0x09, 0x2b, 0x4a, 0xa1, 0xe1, 0x7d, 0xb0, 0x4a,
-	0x2e, 0x7d, 0xd2, 0x8e, 0x62, 0xba, 0x28, 0xd8, 0x5b, 0x92, 0xbd, 0x7a, 0x38, 0x61, 0x43, 0x09,
-	0xe4, 0xae, 0x03, 0x60, 0x36, 0x88, 0x70, 0x03, 0x94, 0x2e, 0x48, 0x7f, 0x74, 0xf3, 0xa0, 0xf0,
-	0x27, 0xfc, 0x14, 0x2c, 0xf4, 0xb0, 0x13, 0x10, 0x99, 0xeb, 0xef, 0xbf, 0x5a, 0xae, 0x3f, 0xb1,
-	0xbb, 0x04, 0x8d, 0x88, 0x3f, 0xbf, 0x72, 0x5f, 0xd1, 0xbf, 0x56, 0x40, 0xa5, 0xe1, 0x59, 0x4d,
-	0xd2, 0x0e, 0xa8, 0xcd, 0xfb, 0x0d, 0xb1, 0xcf, 0x6f, 0xa1, 0x66, 0xa3, 0x44, 0xcd, 0xfe, 0x68,
-	0x7a, 0xae, 0x25, 0x67, 0x57, 0x54, 0xb1, 0xf5, 0xe7, 0x0a, 0xd8, 0xce, 0xa0, 0xdf, 0x42, 0x45,
-	0xfd, 0x75, 0xb2, 0xa2, 0x7e, 0xf0, 0x3a, 0x8b, 0x29, 0xa8, 0xa7, 0x5f, 0x57, 0x72, 0x96, 0x22,
-	0xaa, 0x69, 0xd8, 0xdd, 0x51, 0xbb, 0x67, 0x3b, 0xa4, 0x43, 0x2c, 0xb1, 0x98, 0xf2, 0x44, 0x77,
-	0x17, 0x5b, 0xd0, 0x04, 0x0a, 0x32, 0xb0, 0x63, 0x91, 0x33, 0x1c, 0x38, 0x7c, 0xdf, 0xb2, 0x0e,
-	0xb0, 0x8f, 0x5b, 0xb6, 0x63, 0x73, 0x5b, 0xb6, 0x23, 0xcb, 0xe6, 0x27, 0xc3, 0x81, 0xb6, 0x53,
-	0xcf, 0x45, 0xbc, 0x1c, 0x68, 0x37, 0xb2, 0xdd, 0xbc, 0x11, 0x43, 0xfa, 0xa8, 0x40, 0x1a, 0xf6,
-	0x81, 0x4a, 0xc9, 0x9f, 0x82, 0xf0, 0x50, 0xd4, 0xa9, 0xe7, 0x27, 0xdc, 0x96, 0x84, 0xdb, 0x5f,
-	0x0e, 0x07, 0x9a, 0x8a, 0x0a, 0x30, 0xb3, 0x1d, 0x17, 0xca, 0xc3, 0x2f, 0xc0, 0x26, 0x96, 0x7d,
-	0xf8, 0xa4, 0xd7, 0x79, 0xe1, 0xf5, 0xfe, 0x70, 0xa0, 0x6d, 0xee, 0x67, 0xcd, 0xb3, 0x1d, 0xe6,
-	0x89, 0xc2, 0x1a, 0x58, 0xea, 0x89, 0x96, 0x9d, 0xa9, 0x0b, 0x42, 0x7f, 0x7b, 0x38, 0xd0, 0x96,
-	0x46, 0x5d, 0x7c, 0xa8, 0xb9, 0x78, 0xd4, 0x14, 0x8d, 0x60, 0x84, 0x82, 0x1f, 0x83, 0x95, 0x73,
-	0x8f, 0xf1, 0xc7, 0x84, 0x7f, 0xe9, 0xd1, 0x0b, 0x51, 0x18, 0xca, 0xe6, 0xa6, 0xdc, 0xc1, 0x95,
-	0x87, 0x63, 0x13, 0x9a, 0xc4, 0xc1, 0xdf, 0x82, 0xe5, 0x73, 0xd9, 0xf6, 0x31, 0x75, 0x49, 0x24,
-	0xda, 0xad, 0x29, 0x89, 0x96, 0x68, 0x11, 0xcd, 0x8a, 0x94, 0x5f, 0x8e, 0x86, 0x19, 0x1a, 0xab,
-	0xc1, 0x9f, 0x80, 0x25, 0xf1, 0x71, 0x5c, 0x57, 0xcb, 0x62, 0x36, 0x57, 0x25, 0x7c, 0xe9, 0xe1,
-	0x68, 0x18, 0x45, 0xf6, 0x08, 0x7a, 0xdc, 0x38, 0x50, 0x97, 0xb3, 0xd0, 0xe3, 0xc6, 0x01, 0x8a,
-	0xec, 0xf0, 0x19, 0x58, 0x62, 0xe4, 0x91, 0xed, 0x06, 0x97, 0x2a, 0x10, 0x47, 0xee, 0xf6, 0x94,
-	0xe9, 0x36, 0x0f, 0x05, 0x32, 0xd5, 0x70, 0x8f, 0xd5, 0xa5, 0x1d, 0x45, 0x92, 0xd0, 0x02, 0xcb,
-	0x34, 0x70, 0xf7, 0xd9, 0x53, 0x46, 0xa8, 0xba, 0x92, 0xb9, 0xed, 0xd3, 0xfa, 0x28, 0xc2, 0xa6,
-	0x3d, 0xc4, 0x91, 0x89, 0x11, 0x68, 0x2c, 0x0c, 0x2d, 0x00, 0xc4, 0x87, 0xe8, 0xeb, 0xd5, 0x9d,
-	0x99, 0x7d, 0x20, 0x8a, 0xc1, 0x69, 0x3f, 0xeb, 0xe1, 0xf1, 0x1c, 0x9b, 0xd1, 0x84, 0x2e, 0xfc,
-	0xab, 0x02, 0x20, 0x0b, 0x7c, 0xdf, 0x21, 0x5d, 0xe2, 0x72, 0xec, 0x88, 0x51, 0xa6, 0xae, 0x0a,
-	0x77, 0xbf, 0x98, 0x16, 0xb5, 0x0c, 0x29, 0xed, 0x36, 0x6e, 0x06, 0xb2, 0x50, 0x94, 0xe3, 0x33,
-	0xdc, 0xb4, 0x33, 0xb9, 0xda, 0xb5, 0x99, 0x9b, 0x96, 0xff, 0x2f, 0x69, 0xbc, 0x69, 0xd2, 0x8e,
-	0x22, 0x49, 0xf8, 0x39, 0xd8, 0x89, 0xfe, 0x43, 0x22, 0xcf, 0xe3, 0x47, 0xb6, 0x43, 0x58, 0x9f,
-	0x71, 0xd2, 0x55, 0xd7, 0x45, 0x32, 0x55, 0x25, 0x73, 0x07, 0xe5, 0xa2, 0x50, 0x01, 0x1b, 0x76,
-	0x81, 0x16, 0x15, 0xa1, 0xf0, 0x84, 0xc6, 0x55, 0xf0, 0x90, 0xb5, 0xb1, 0x33, 0xea, 0x8d, 0xae,
-	0x0a, 0x07, 0xef, 0x0d, 0x07, 0x9a, 0x56, 0x9f, 0x0e, 0x45, 0xb3, 0xb4, 0xe0, 0x6f, 0x80, 0x8a,
-	0x8b, 0xfc, 0x6c, 0x08, 0x3f, 0x3f, 0x0a, 0x2b, 0x5b, 0xa1, 0x83, 0x42, 0x36, 0xf4, 0xc1, 0x06,
-	0x4e, 0xfe, 0x9b, 0x67, 0x6a, 0x45, 0x9c, 0xf5, 0xf7, 0xa7, 0xec, 0x43, 0xea, 0x01, 0xc0, 0x54,
-	0x65, 0x18, 0x37, 0x52, 0x06, 0x86, 0x32, 0xea, 0xf0, 0x12, 0x40, 0x9c, 0x7e, 0x7c, 0x60, 0x2a,
-	0x9c, 0x79, 0x91, 0x65, 0x5e, 0x2c, 0xc6, 0xa9, 0x96, 0x31, 0x31, 0x94, 0xe3, 0x03, 0x72, 0x50,
-	0xc1, 0xa9, 0xc7, 0x12, 0xa6, 0x5e, 0x13, 0x8e, 0x7f, 0x3a, 0xdb, 0x71, 0xcc, 0x31, 0xaf, 0x4b,
-	0xbf, 0x95, 0xb4, 0x85, 0xa1, 0xac, 0x03, 0xf8, 0x08, 0x6c, 0xc9, 0xc1, 0xa7, 0x2e, 0xc3, 0x67,
-	0xa4, 0xd9, 0x67, 0x6d, 0xee, 0x30, 0x75, 0x53, 0xd4, 0x6e, 0x75, 0x38, 0xd0, 0xb6, 0xf6, 0x73,
-	0xec, 0x28, 0x97, 0x05, 0x3f, 0x05, 0x1b, 0x67, 0x1e, 0x6d, 0xd9, 0x96, 0x45, 0xdc, 0x48, 0x69,
-	0x4b, 0x28, 0x6d, 0x85, 0xf1, 0x3f, 0x4a, 0xd9, 0x50, 0x06, 0x0d, 0x19, 0xd8, 0x96, 0xca, 0x0d,
-	0xea, 0xb5, 0x4f, 0xbc, 0xc0, 0xe5, 0xe1, 0x75, 0xc1, 0xd4, 0xed, 0xf8, 0x8a, 0xdc, 0xde, 0xcf,
-	0x03, 0xbc, 0x1c, 0x68, 0x37, 0x73, 0xae, 0xab, 0x04, 0x08, 0xe5, 0x6b, 0x43, 0x07, 0xac, 0xca,
-	0xe7, 0xaf, 0x03, 0x07, 0x33, 0xa6, 0xaa, 0xe2, 0xa8, 0xdf, 0x9b, 0x5e, 0xd8, 0x62, 0x78, 0xfa,
-	0xbc, 0x8b, 0xff, 0x65, 0x93, 0x00, 0x94, 0x50, 0xd7, 0xff, 0xae, 0x80, 0xeb, 0x85, 0x85, 0x11,
-	0xde, 0x4b, 0xbc, 0xa9, 0xe8, 0xa9, 0x37, 0x15, 0x98, 0x25, 0xbe, 0x81, 0x27, 0x95, 0xaf, 0x14,
-	0xa0, 0x16, 0xdd, 0x10, 0xf0, 0xe3, 0xc4, 0x04, 0xdf, 0x4d, 0x4d, 0xb0, 0x92, 0xe1, 0xbd, 0x81,
-	0xf9, 0x7d, 0xa3, 0x80, 0x77, 0xa6, 0xec, 0x40, 0x5c, 0x90, 0x88, 0x35, 0x89, 0x7a, 0x8c, 0xc3,
-	0xa3, 0xac, 0x88, 0x3c, 0x1a, 0x17, 0xa4, 0x1c, 0x0c, 0x2a, 0x64, 0xc3, 0xa7, 0xe0, 0x9a, 0xac,
-	0x86, 0x69, 0x9b, 0xe8, 0xdc, 0x97, 0xcd, 0x77, 0x86, 0x03, 0xed, 0x5a, 0x3d, 0x1f, 0x82, 0x8a,
-	0xb8, 0xfa, 0x3f, 0x15, 0xb0, 0x93, 0x7f, 0xe5, 0xc3, 0x3b, 0x89, 0x70, 0x6b, 0xa9, 0x70, 0x5f,
-	0x4d, 0xb1, 0x64, 0xb0, 0xff, 0x00, 0xd6, 0x65, 0x63, 0x90, 0x7c, 0x22, 0x4c, 0x04, 0x3d, 0x3c,
-	0x22, 0x61, 0x4f, 0x2f, 0x25, 0xa2, 0xf4, 0x15, 0xff, 0xc6, 0x93, 0x63, 0x28, 0xa5, 0xa6, 0xff,
-	0x4b, 0x01, 0xef, 0xce, 0xbc, 0x6c, 0xa1, 0x99, 0x98, 0xba, 0x91, 0x9a, 0x7a, 0xb5, 0x58, 0xe0,
-	0xcd, 0xbc, 0x14, 0x9a, 0x1f, 0x3e, 0x7f, 0x51, 0x9d, 0xfb, 0xf6, 0x45, 0x75, 0xee, 0xbb, 0x17,
-	0xd5, 0xb9, 0x3f, 0x0f, 0xab, 0xca, 0xf3, 0x61, 0x55, 0xf9, 0x76, 0x58, 0x55, 0xbe, 0x1b, 0x56,
-	0x95, 0xff, 0x0e, 0xab, 0xca, 0xdf, 0xfe, 0x57, 0x9d, 0xfb, 0xdd, 0x92, 0x94, 0xfb, 0x3e, 0x00,
-	0x00, 0xff, 0xff, 0x48, 0x23, 0x7b, 0x0e, 0x44, 0x18, 0x00, 0x00,
+	0x50, 0x04, 0xb9, 0x0c, 0x7a, 0x41, 0xd4, 0xec, 0x5b, 0xf8, 0xaa, 0xd8, 0xe1, 0xec, 0x92, 0xfb,
+	0x47, 0x5a, 0x01, 0xec, 0x3b, 0xee, 0x9c, 0xef, 0xfb, 0xce, 0xcc, 0x99, 0x33, 0x67, 0x0e, 0x07,
+	0x98, 0x17, 0x0f, 0x98, 0x61, 0x7b, 0xb5, 0x8b, 0xa0, 0x45, 0xa8, 0x4b, 0x38, 0x61, 0xb5, 0x1e,
+	0x71, 0x2d, 0x8f, 0xd6, 0xa4, 0x01, 0xfb, 0x76, 0xcd, 0xf7, 0x1c, 0xbb, 0xdd, 0xaf, 0xf5, 0xee,
+	0xb4, 0x08, 0xc7, 0x77, 0x6a, 0x1d, 0xe2, 0x12, 0x8a, 0x39, 0xb1, 0x0c, 0x9f, 0x7a, 0xdc, 0x83,
+	0x37, 0x47, 0x50, 0x03, 0xfb, 0xb6, 0x31, 0x82, 0x1a, 0x12, 0xba, 0xfb, 0x51, 0xc7, 0xe6, 0xe7,
+	0x41, 0xcb, 0x68, 0x7b, 0xdd, 0x5a, 0xc7, 0xeb, 0x78, 0x35, 0xc1, 0x68, 0x05, 0x67, 0xe2, 0x4b,
+	0x7c, 0x88, 0x5f, 0x23, 0xa5, 0x5d, 0x7d, 0xc2, 0x69, 0xdb, 0xa3, 0xa4, 0xd6, 0xcb, 0x78, 0xdb,
+	0xbd, 0x37, 0xc6, 0x74, 0x71, 0xfb, 0xdc, 0x76, 0x09, 0xed, 0xd7, 0xfc, 0x8b, 0x4e, 0x38, 0xc0,
+	0x6a, 0x5d, 0xc2, 0x71, 0x1e, 0xab, 0x56, 0xc4, 0xa2, 0x81, 0xcb, 0xed, 0x2e, 0xc9, 0x10, 0xee,
+	0xcf, 0x22, 0xb0, 0xf6, 0x39, 0xe9, 0xe2, 0x0c, 0xef, 0x6e, 0x11, 0x2f, 0xe0, 0xb6, 0x53, 0xb3,
+	0x5d, 0xce, 0x38, 0x4d, 0x93, 0xf4, 0x7b, 0x60, 0x63, 0xdf, 0x71, 0xbc, 0xaf, 0x88, 0x75, 0xd0,
+	0x3c, 0xae, 0x53, 0xbb, 0x47, 0x28, 0xbc, 0x05, 0xe6, 0x5d, 0xdc, 0x25, 0xaa, 0x72, 0x4b, 0xb9,
+	0xbd, 0x6c, 0xae, 0xbe, 0x18, 0x68, 0x73, 0xc3, 0x81, 0x36, 0xff, 0x04, 0x77, 0x09, 0x12, 0x16,
+	0xfd, 0x53, 0x50, 0x91, 0xac, 0x23, 0x87, 0x5c, 0x7e, 0xe1, 0x39, 0x41, 0x97, 0xc0, 0x1f, 0x83,
+	0x45, 0x4b, 0x08, 0x48, 0xe2, 0xba, 0x24, 0x2e, 0x8e, 0x64, 0x91, 0xb4, 0xea, 0x0c, 0x5c, 0x97,
+	0xe4, 0x47, 0x1e, 0xe3, 0x0d, 0xcc, 0xcf, 0xe1, 0x1e, 0x00, 0x3e, 0xe6, 0xe7, 0x0d, 0x4a, 0xce,
+	0xec, 0x4b, 0x49, 0x87, 0x92, 0x0e, 0x1a, 0xb1, 0x05, 0x4d, 0xa0, 0xe0, 0x87, 0xa0, 0x4c, 0x09,
+	0xb6, 0x4e, 0x5d, 0xa7, 0xaf, 0x5e, 0xbb, 0xa5, 0xdc, 0x2e, 0x9b, 0x1b, 0x92, 0x51, 0x46, 0x72,
+	0x1c, 0xc5, 0x08, 0xfd, 0x3f, 0x0a, 0x28, 0x1f, 0xf6, 0xec, 0x36, 0xb7, 0x3d, 0x17, 0xfe, 0x11,
+	0x94, 0xc3, 0xdd, 0xb2, 0x30, 0xc7, 0xc2, 0xd9, 0xca, 0xde, 0xc7, 0xc6, 0x38, 0x93, 0xe2, 0xe0,
+	0x19, 0xfe, 0x45, 0x27, 0x1c, 0x60, 0x46, 0x88, 0x36, 0x7a, 0x77, 0x8c, 0xd3, 0xd6, 0x97, 0xa4,
+	0xcd, 0x4f, 0x08, 0xc7, 0xe3, 0xe9, 0x8d, 0xc7, 0x50, 0xac, 0x0a, 0x1d, 0xb0, 0x66, 0x11, 0x87,
+	0x70, 0x72, 0xea, 0x87, 0x1e, 0x99, 0x98, 0xe1, 0xca, 0xde, 0xdd, 0xd7, 0x73, 0x53, 0x9f, 0xa4,
+	0x9a, 0x95, 0xe1, 0x40, 0x5b, 0x4b, 0x0c, 0xa1, 0xa4, 0xb8, 0xfe, 0xb5, 0x02, 0x76, 0x8e, 0x9a,
+	0x0f, 0xa9, 0x17, 0xf8, 0x4d, 0x1e, 0xee, 0x6e, 0xa7, 0x2f, 0x4d, 0xf0, 0x67, 0x60, 0x9e, 0x06,
+	0x4e, 0xb4, 0x97, 0xef, 0x47, 0x7b, 0x89, 0x02, 0x87, 0xbc, 0x1a, 0x68, 0x9b, 0x29, 0xd6, 0xd3,
+	0xbe, 0x4f, 0x90, 0x20, 0xc0, 0xcf, 0xc1, 0x22, 0xc5, 0x6e, 0x87, 0x84, 0x53, 0x2f, 0xdd, 0x5e,
+	0xd9, 0xd3, 0x8d, 0xc2, 0xb3, 0x66, 0x1c, 0xd7, 0x51, 0x08, 0x1d, 0xef, 0xb8, 0xf8, 0x64, 0x48,
+	0x2a, 0xe8, 0x27, 0x60, 0x4d, 0x6c, 0xb5, 0x47, 0xb9, 0xb0, 0xc0, 0x77, 0x41, 0xa9, 0x6b, 0xbb,
+	0x62, 0x52, 0x0b, 0xe6, 0x8a, 0x64, 0x95, 0x4e, 0x6c, 0x17, 0x85, 0xe3, 0xc2, 0x8c, 0x2f, 0x45,
+	0xcc, 0x26, 0xcd, 0xf8, 0x12, 0x85, 0xe3, 0xfa, 0x43, 0xb0, 0x24, 0x3d, 0x4e, 0x0a, 0x95, 0xa6,
+	0x0b, 0x95, 0x72, 0x84, 0xfe, 0x71, 0x0d, 0x6c, 0x36, 0x3c, 0xab, 0x6e, 0x33, 0x1a, 0x88, 0x78,
+	0x99, 0x81, 0xd5, 0x21, 0xfc, 0x2d, 0xe4, 0xc7, 0x53, 0x30, 0xcf, 0x7c, 0xd2, 0x96, 0x69, 0xb1,
+	0x37, 0x25, 0xb6, 0x39, 0xf3, 0x6b, 0xfa, 0xa4, 0x3d, 0x3e, 0x96, 0xe1, 0x17, 0x12, 0x6a, 0xf0,
+	0x39, 0x58, 0x64, 0x1c, 0xf3, 0x80, 0xa9, 0x25, 0xa1, 0x7b, 0xef, 0x8a, 0xba, 0x82, 0x3b, 0xde,
+	0xc5, 0xd1, 0x37, 0x92, 0x9a, 0xfa, 0xbf, 0x15, 0x70, 0x23, 0x87, 0xf5, 0xd8, 0x66, 0x1c, 0x3e,
+	0xcf, 0x44, 0xcc, 0x78, 0xbd, 0x88, 0x85, 0x6c, 0x11, 0xaf, 0xf8, 0xf0, 0x46, 0x23, 0x13, 0xd1,
+	0x6a, 0x82, 0x05, 0x9b, 0x93, 0x6e, 0x94, 0x8a, 0xc6, 0xd5, 0x96, 0x65, 0xae, 0x49, 0xe9, 0x85,
+	0xe3, 0x50, 0x04, 0x8d, 0xb4, 0xf4, 0x6f, 0xaf, 0xe5, 0x2e, 0x27, 0x0c, 0x27, 0x3c, 0x03, 0xab,
+	0x5d, 0xdb, 0xdd, 0xef, 0x61, 0xdb, 0xc1, 0x2d, 0x79, 0x7a, 0xa6, 0x25, 0x41, 0x58, 0x61, 0x8d,
+	0x51, 0x85, 0x35, 0x8e, 0x5d, 0x7e, 0x4a, 0x9b, 0x9c, 0xda, 0x6e, 0xc7, 0xdc, 0x18, 0x0e, 0xb4,
+	0xd5, 0x93, 0x09, 0x25, 0x94, 0xd0, 0x85, 0xbf, 0x07, 0x65, 0x46, 0x1c, 0xd2, 0xe6, 0x1e, 0xbd,
+	0x5a, 0x85, 0x78, 0x8c, 0x5b, 0xc4, 0x69, 0x4a, 0xaa, 0xb9, 0x1a, 0xc6, 0x2d, 0xfa, 0x42, 0xb1,
+	0x24, 0x74, 0xc0, 0x7a, 0x17, 0x5f, 0x3e, 0x73, 0x71, 0xbc, 0x90, 0xd2, 0x0f, 0x5c, 0x08, 0x1c,
+	0x0e, 0xb4, 0xf5, 0x93, 0x84, 0x16, 0x4a, 0x69, 0xeb, 0xc3, 0x79, 0x70, 0xb3, 0x30, 0xab, 0xe0,
+	0xe7, 0x00, 0x7a, 0x2d, 0x46, 0x68, 0x8f, 0x58, 0x0f, 0x47, 0x77, 0x90, 0xed, 0x45, 0x07, 0x77,
+	0x57, 0x6e, 0x10, 0x3c, 0xcd, 0x20, 0x50, 0x0e, 0x0b, 0xfe, 0x45, 0x01, 0x6b, 0xd6, 0xc8, 0x0d,
+	0xb1, 0x1a, 0x9e, 0x15, 0x25, 0xc6, 0xc3, 0x1f, 0x92, 0xef, 0x46, 0x7d, 0x52, 0xe9, 0xd0, 0xe5,
+	0xb4, 0x6f, 0x6e, 0xcb, 0x09, 0xad, 0x25, 0x6c, 0x28, 0xe9, 0x34, 0x5c, 0x92, 0x15, 0x4b, 0x32,
+	0x79, 0xa7, 0x89, 0x10, 0x2f, 0x8c, 0x97, 0x54, 0xcf, 0x20, 0x50, 0x0e, 0x0b, 0xfe, 0x0a, 0xac,
+	0xb7, 0x03, 0x4a, 0x89, 0xcb, 0x1f, 0x11, 0xec, 0xf0, 0xf3, 0xbe, 0x3a, 0x2f, 0x74, 0x76, 0xa4,
+	0xce, 0xfa, 0x41, 0xc2, 0x8a, 0x52, 0xe8, 0x90, 0x6f, 0x11, 0x66, 0x53, 0x62, 0x45, 0xfc, 0x85,
+	0x24, 0xbf, 0x9e, 0xb0, 0xa2, 0x14, 0x1a, 0x3e, 0x00, 0xab, 0xe4, 0xd2, 0x27, 0xed, 0x28, 0xa0,
+	0x8b, 0x82, 0xbd, 0x25, 0xd9, 0xab, 0x87, 0x13, 0x36, 0x94, 0x40, 0xee, 0x3a, 0x00, 0x66, 0x23,
+	0x08, 0x37, 0x40, 0xe9, 0x82, 0xf4, 0x47, 0xd7, 0x0e, 0x0a, 0x7f, 0xc2, 0xcf, 0xc0, 0x42, 0x0f,
+	0x3b, 0x01, 0x91, 0x89, 0xfe, 0xc1, 0xeb, 0x25, 0xfa, 0x53, 0xbb, 0x4b, 0xd0, 0x88, 0xf8, 0xf3,
+	0x6b, 0x0f, 0x14, 0xfd, 0x1b, 0x05, 0x54, 0x1a, 0x9e, 0xd5, 0x24, 0xed, 0x80, 0xda, 0xbc, 0xdf,
+	0x10, 0x9b, 0xfc, 0x16, 0x0a, 0x36, 0x4a, 0x14, 0xec, 0x8f, 0xa7, 0x27, 0x5a, 0x72, 0x76, 0x45,
+	0xe5, 0x5a, 0x7f, 0xa1, 0x80, 0xed, 0x0c, 0xfa, 0x2d, 0x94, 0xd3, 0x5f, 0x27, 0xcb, 0xe9, 0x87,
+	0x57, 0x59, 0x4c, 0x41, 0x31, 0xfd, 0xa6, 0x92, 0xb3, 0x14, 0x51, 0x4a, 0xc3, 0xd6, 0x8e, 0xda,
+	0x3d, 0xdb, 0x21, 0x1d, 0x62, 0x89, 0xc5, 0x94, 0x27, 0x5a, 0xbb, 0xd8, 0x82, 0x26, 0x50, 0x90,
+	0x81, 0x1d, 0x8b, 0x9c, 0xe1, 0xc0, 0xe1, 0xfb, 0x96, 0x75, 0x80, 0x7d, 0xdc, 0xb2, 0x1d, 0x9b,
+	0xdb, 0xb2, 0x17, 0x59, 0x36, 0x3f, 0x1d, 0x0e, 0xb4, 0x9d, 0x7a, 0x2e, 0xe2, 0xd5, 0x40, 0x7b,
+	0x37, 0xdb, 0xca, 0x1b, 0x31, 0xa4, 0x8f, 0x0a, 0xa4, 0x61, 0x1f, 0xa8, 0x94, 0xfc, 0x29, 0x08,
+	0x0f, 0x45, 0x9d, 0x7a, 0x7e, 0xc2, 0x6d, 0x49, 0xb8, 0xfd, 0xe5, 0x70, 0xa0, 0xa9, 0xa8, 0x00,
+	0x33, 0xdb, 0x71, 0xa1, 0x3c, 0xfc, 0x12, 0x6c, 0x62, 0xd9, 0x84, 0x4f, 0x7a, 0x9d, 0x17, 0x5e,
+	0x1f, 0x0c, 0x07, 0xda, 0xe6, 0x7e, 0xd6, 0x3c, 0xdb, 0x61, 0x9e, 0x28, 0xac, 0x81, 0xa5, 0x9e,
+	0xe8, 0xd7, 0x99, 0xba, 0x20, 0xf4, 0xb7, 0x87, 0x03, 0x6d, 0x69, 0xd4, 0xc2, 0x87, 0x9a, 0x8b,
+	0x47, 0x4d, 0xd1, 0x05, 0x46, 0x28, 0xf8, 0x09, 0x58, 0x39, 0xf7, 0x18, 0x7f, 0x42, 0xf8, 0x57,
+	0x1e, 0xbd, 0x10, 0x85, 0xa1, 0x6c, 0x6e, 0xca, 0x1d, 0x5c, 0x79, 0x34, 0x36, 0xa1, 0x49, 0x1c,
+	0xfc, 0x2d, 0x58, 0x3e, 0x97, 0x3d, 0x1f, 0x53, 0x97, 0x44, 0xa2, 0xdd, 0x9e, 0x92, 0x68, 0x89,
+	0xfe, 0xd0, 0xac, 0x48, 0xf9, 0xe5, 0x68, 0x98, 0xa1, 0xb1, 0x1a, 0xfc, 0x09, 0x58, 0x12, 0x1f,
+	0xc7, 0x75, 0xb5, 0x2c, 0x66, 0x73, 0x5d, 0xc2, 0x97, 0x1e, 0x8d, 0x86, 0x51, 0x64, 0x8f, 0xa0,
+	0xc7, 0x8d, 0x03, 0x75, 0x39, 0x0b, 0x3d, 0x6e, 0x1c, 0xa0, 0xc8, 0x0e, 0x9f, 0x83, 0x25, 0x46,
+	0x1e, 0xdb, 0x6e, 0x70, 0xa9, 0x02, 0x71, 0xe4, 0xee, 0x4c, 0x99, 0x6e, 0xf3, 0x50, 0x20, 0x53,
+	0xdd, 0xf6, 0x58, 0x5d, 0xda, 0x51, 0x24, 0x09, 0x2d, 0xb0, 0x4c, 0x03, 0x77, 0x9f, 0x3d, 0x63,
+	0x84, 0xaa, 0x2b, 0x99, 0xab, 0x3e, 0xad, 0x8f, 0x22, 0x6c, 0xda, 0x43, 0x1c, 0x99, 0x18, 0x81,
+	0xc6, 0xc2, 0xd0, 0x02, 0x40, 0x7c, 0x88, 0xa6, 0x5e, 0xdd, 0x99, 0xd9, 0x04, 0xa2, 0x18, 0x9c,
+	0xf6, 0xb3, 0x1e, 0x1e, 0xcf, 0xb1, 0x19, 0x4d, 0xe8, 0xc2, 0xbf, 0x2a, 0x00, 0xb2, 0xc0, 0xf7,
+	0x1d, 0xd2, 0x25, 0x2e, 0xc7, 0x8e, 0x18, 0x65, 0xea, 0xaa, 0x70, 0xf7, 0x8b, 0x69, 0x51, 0xcb,
+	0x90, 0xd2, 0x6e, 0xe3, 0x6b, 0x33, 0x0b, 0x45, 0x39, 0x3e, 0xc3, 0x4d, 0x3b, 0x93, 0xab, 0x5d,
+	0x9b, 0xb9, 0x69, 0xf9, 0x7f, 0x91, 0xc6, 0x9b, 0x26, 0xed, 0x28, 0x92, 0x84, 0x5f, 0x80, 0x9d,
+	0xe8, 0x0f, 0x24, 0xf2, 0x3c, 0x7e, 0x64, 0x3b, 0x84, 0xf5, 0x19, 0x27, 0x5d, 0x75, 0x5d, 0x24,
+	0x53, 0x55, 0x32, 0x77, 0x50, 0x2e, 0x0a, 0x15, 0xb0, 0x61, 0x17, 0x68, 0x51, 0x11, 0x0a, 0x4f,
+	0x68, 0x5c, 0x05, 0x0f, 0x59, 0x1b, 0x3b, 0xa3, 0xc6, 0xe8, 0xba, 0x70, 0xf0, 0xfe, 0x70, 0xa0,
+	0x69, 0xf5, 0xe9, 0x50, 0x34, 0x4b, 0x0b, 0xfe, 0x06, 0xa8, 0xb8, 0xc8, 0xcf, 0x86, 0xf0, 0xf3,
+	0xa3, 0xb0, 0xb2, 0x15, 0x3a, 0x28, 0x64, 0x43, 0x1f, 0x6c, 0xe0, 0xe4, 0x5f, 0x79, 0xa6, 0x56,
+	0xc4, 0x59, 0xff, 0x60, 0xca, 0x3e, 0xa4, 0xfe, 0xfd, 0x9b, 0xaa, 0x0c, 0xe3, 0x46, 0xca, 0xc0,
+	0x50, 0x46, 0x1d, 0x5e, 0x02, 0x88, 0xd3, 0x2f, 0x0f, 0x4c, 0x85, 0x33, 0x2f, 0xb2, 0xcc, 0x73,
+	0xc5, 0x38, 0xd5, 0x32, 0x26, 0x86, 0x72, 0x7c, 0x40, 0x0e, 0x2a, 0x38, 0xf5, 0x52, 0xc2, 0xd4,
+	0x1b, 0xc2, 0xf1, 0x4f, 0x67, 0x3b, 0x8e, 0x39, 0xe6, 0x4d, 0xe9, 0xb7, 0x92, 0xb6, 0x30, 0x94,
+	0x75, 0x00, 0x1f, 0x83, 0x2d, 0x39, 0xf8, 0xcc, 0x65, 0xf8, 0x8c, 0x34, 0xfb, 0xac, 0xcd, 0x1d,
+	0xa6, 0x6e, 0x8a, 0xda, 0xad, 0x0e, 0x07, 0xda, 0xd6, 0x7e, 0x8e, 0x1d, 0xe5, 0xb2, 0xe0, 0x67,
+	0x60, 0xe3, 0xcc, 0xa3, 0x2d, 0xdb, 0xb2, 0x88, 0x1b, 0x29, 0x6d, 0x09, 0xa5, 0xad, 0x30, 0xfe,
+	0x47, 0x29, 0x1b, 0xca, 0xa0, 0x21, 0x03, 0xdb, 0x52, 0xb9, 0x41, 0xbd, 0xf6, 0x89, 0x17, 0xb8,
+	0x3c, 0xbc, 0x2e, 0x98, 0xba, 0x1d, 0x5f, 0x91, 0xdb, 0xfb, 0x79, 0x80, 0x57, 0x03, 0xed, 0x56,
+	0xce, 0x75, 0x95, 0x00, 0xa1, 0x7c, 0x6d, 0xe8, 0x80, 0x55, 0xf9, 0xf6, 0x75, 0xe0, 0x60, 0xc6,
+	0x54, 0x55, 0x1c, 0xf5, 0xfb, 0xd3, 0x0b, 0x5b, 0x0c, 0x4f, 0x9f, 0x77, 0xf1, 0xa7, 0x6c, 0x12,
+	0x80, 0x12, 0xea, 0xfa, 0xdf, 0x15, 0x70, 0xb3, 0xb0, 0x30, 0xc2, 0xfb, 0x89, 0x07, 0x15, 0x3d,
+	0xf5, 0xa0, 0x02, 0xb3, 0xc4, 0x37, 0xf0, 0x9e, 0xf2, 0xb5, 0x02, 0xd4, 0xa2, 0x1b, 0x02, 0x7e,
+	0x92, 0x98, 0xe0, 0x7b, 0xa9, 0x09, 0x56, 0x32, 0xbc, 0x37, 0x30, 0xbf, 0x6f, 0x15, 0xf0, 0xce,
+	0x94, 0x1d, 0x88, 0x0b, 0x12, 0xb1, 0x26, 0x51, 0x4f, 0x70, 0x78, 0x94, 0x15, 0x91, 0x47, 0xe3,
+	0x82, 0x94, 0x83, 0x41, 0x85, 0x6c, 0xf8, 0x0c, 0xdc, 0x90, 0xd5, 0x30, 0x6d, 0x13, 0x9d, 0xfb,
+	0xb2, 0xf9, 0xce, 0x70, 0xa0, 0xdd, 0xa8, 0xe7, 0x43, 0x50, 0x11, 0x57, 0xff, 0xa7, 0x02, 0x76,
+	0xf2, 0xaf, 0x7c, 0x78, 0x37, 0x11, 0x6e, 0x2d, 0x15, 0xee, 0xeb, 0x29, 0x96, 0x0c, 0xf6, 0x1f,
+	0xc0, 0xba, 0x6c, 0x0c, 0x92, 0xef, 0x83, 0x89, 0xa0, 0x87, 0x47, 0x24, 0xec, 0xe9, 0xa5, 0x44,
+	0x94, 0xbe, 0xe2, 0xaf, 0x78, 0x72, 0x0c, 0xa5, 0xd4, 0xf4, 0x7f, 0x29, 0xe0, 0xbd, 0x99, 0x97,
+	0x2d, 0x34, 0x13, 0x53, 0x37, 0x52, 0x53, 0xaf, 0x16, 0x0b, 0xbc, 0x99, 0x67, 0x42, 0xf3, 0xa3,
+	0x17, 0x2f, 0xab, 0x73, 0xdf, 0xbd, 0xac, 0xce, 0x7d, 0xff, 0xb2, 0x3a, 0xf7, 0xe7, 0x61, 0x55,
+	0x79, 0x31, 0xac, 0x2a, 0xdf, 0x0d, 0xab, 0xca, 0xf7, 0xc3, 0xaa, 0xf2, 0xdf, 0x61, 0x55, 0xf9,
+	0xdb, 0xff, 0xaa, 0x73, 0xbf, 0x5b, 0x92, 0x72, 0xff, 0x0f, 0x00, 0x00, 0xff, 0xff, 0x5d, 0xe0,
+	0x55, 0x1c, 0x41, 0x18, 0x00, 0x00,
 }
 
 func (m *AllowedCSIDriver) Marshal() (dAtA []byte, err error) {
@@ -1155,7 +1155,7 @@ func (m *PodDisruptionBudgetStatus) MarshalToSizedBuffer(dAtA []byte) (int, erro
 	i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentHealthy))
 	i--
 	dAtA[i] = 0x20
-	i = encodeVarintGenerated(dAtA, i, uint64(m.PodDisruptionsAllowed))
+	i = encodeVarintGenerated(dAtA, i, uint64(m.DisruptionsAllowed))
 	i--
 	dAtA[i] = 0x18
 	if len(m.DisruptedPods) > 0 {
@@ -1940,7 +1940,7 @@ func (m *PodDisruptionBudgetStatus) Size() (n int) {
 			n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize))
 		}
 	}
-	n += 1 + sovGenerated(uint64(m.PodDisruptionsAllowed))
+	n += 1 + sovGenerated(uint64(m.DisruptionsAllowed))
 	n += 1 + sovGenerated(uint64(m.CurrentHealthy))
 	n += 1 + sovGenerated(uint64(m.DesiredHealthy))
 	n += 1 + sovGenerated(uint64(m.ExpectedPods))
@@ -2307,7 +2307,7 @@ func (this *PodDisruptionBudgetStatus) String() string {
 	s := strings.Join([]string{`&PodDisruptionBudgetStatus{`,
 		`ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`,
 		`DisruptedPods:` + mapStringForDisruptedPods + `,`,
-		`PodDisruptionsAllowed:` + fmt.Sprintf("%v", this.PodDisruptionsAllowed) + `,`,
+		`DisruptionsAllowed:` + fmt.Sprintf("%v", this.DisruptionsAllowed) + `,`,
 		`CurrentHealthy:` + fmt.Sprintf("%v", this.CurrentHealthy) + `,`,
 		`DesiredHealthy:` + fmt.Sprintf("%v", this.DesiredHealthy) + `,`,
 		`ExpectedPods:` + fmt.Sprintf("%v", this.ExpectedPods) + `,`,
@@ -3783,9 +3783,9 @@ func (m *PodDisruptionBudgetStatus) Unmarshal(dAtA []byte) error {
 			iNdEx = postIndex
 		case 3:
 			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field PodDisruptionsAllowed", wireType)
+				return fmt.Errorf("proto: wrong wireType = %d for field DisruptionsAllowed", wireType)
 			}
-			m.PodDisruptionsAllowed = 0
+			m.DisruptionsAllowed = 0
 			for shift := uint(0); ; shift += 7 {
 				if shift >= 64 {
 					return ErrIntOverflowGenerated
@@ -3795,7 +3795,7 @@ func (m *PodDisruptionBudgetStatus) Unmarshal(dAtA []byte) error {
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				m.PodDisruptionsAllowed |= int32(b&0x7F) << shift
+				m.DisruptionsAllowed |= int32(b&0x7F) << shift
 				if b < 0x80 {
 					break
 				}
@@ -5478,6 +5478,7 @@ func (m *SupplementalGroupsStrategyOptions) Unmarshal(dAtA []byte) error {
 func skipGenerated(dAtA []byte) (n int, err error) {
 	l := len(dAtA)
 	iNdEx := 0
+	depth := 0
 	for iNdEx < l {
 		var wire uint64
 		for shift := uint(0); ; shift += 7 {
@@ -5509,10 +5510,8 @@ func skipGenerated(dAtA []byte) (n int, err error) {
 					break
 				}
 			}
-			return iNdEx, nil
 		case 1:
 			iNdEx += 8
-			return iNdEx, nil
 		case 2:
 			var length int
 			for shift := uint(0); ; shift += 7 {
@@ -5533,55 +5532,30 @@ func skipGenerated(dAtA []byte) (n int, err error) {
 				return 0, ErrInvalidLengthGenerated
 			}
 			iNdEx += length
-			if iNdEx < 0 {
-				return 0, ErrInvalidLengthGenerated
-			}
-			return iNdEx, nil
 		case 3:
-			for {
-				var innerWire uint64
-				var start int = iNdEx
-				for shift := uint(0); ; shift += 7 {
-					if shift >= 64 {
-						return 0, ErrIntOverflowGenerated
-					}
-					if iNdEx >= l {
-						return 0, io.ErrUnexpectedEOF
-					}
-					b := dAtA[iNdEx]
-					iNdEx++
-					innerWire |= (uint64(b) & 0x7F) << shift
-					if b < 0x80 {
-						break
-					}
-				}
-				innerWireType := int(innerWire & 0x7)
-				if innerWireType == 4 {
-					break
-				}
-				next, err := skipGenerated(dAtA[start:])
-				if err != nil {
-					return 0, err
-				}
-				iNdEx = start + next
-				if iNdEx < 0 {
-					return 0, ErrInvalidLengthGenerated
-				}
-			}
-			return iNdEx, nil
+			depth++
 		case 4:
-			return iNdEx, nil
+			if depth == 0 {
+				return 0, ErrUnexpectedEndOfGroupGenerated
+			}
+			depth--
 		case 5:
 			iNdEx += 4
-			return iNdEx, nil
 		default:
 			return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
 		}
+		if iNdEx < 0 {
+			return 0, ErrInvalidLengthGenerated
+		}
+		if depth == 0 {
+			return iNdEx, nil
+		}
 	}
-	panic("unreachable")
+	return 0, io.ErrUnexpectedEOF
 }
 
 var (
-	ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling")
-	ErrIntOverflowGenerated   = fmt.Errorf("proto: integer overflow")
+	ErrInvalidLengthGenerated        = fmt.Errorf("proto: negative length found during unmarshaling")
+	ErrIntOverflowGenerated          = fmt.Errorf("proto: integer overflow")
+	ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group")
 )
diff --git a/vendor/k8s.io/api/policy/v1beta1/generated.proto b/vendor/k8s.io/api/policy/v1beta1/generated.proto
index 9679dafc..d0448374 100644
--- a/vendor/k8s.io/api/policy/v1beta1/generated.proto
+++ b/vendor/k8s.io/api/policy/v1beta1/generated.proto
@@ -150,7 +150,7 @@ message PodDisruptionBudgetSpec {
 // PodDisruptionBudgetStatus represents information about the status of a
 // PodDisruptionBudget. Status may trail the actual state of a system.
 message PodDisruptionBudgetStatus {
-  // Most recent generation observed when updating this PDB status. PodDisruptionsAllowed and other
+  // Most recent generation observed when updating this PDB status. DisruptionsAllowed and other
   // status information is valid only if observedGeneration equals to PDB's object generation.
   // +optional
   optional int64 observedGeneration = 1;
diff --git a/vendor/k8s.io/api/policy/v1beta1/types.go b/vendor/k8s.io/api/policy/v1beta1/types.go
index d8e417ab..e6a59763 100644
--- a/vendor/k8s.io/api/policy/v1beta1/types.go
+++ b/vendor/k8s.io/api/policy/v1beta1/types.go
@@ -47,7 +47,7 @@ type PodDisruptionBudgetSpec struct {
 // PodDisruptionBudgetStatus represents information about the status of a
 // PodDisruptionBudget. Status may trail the actual state of a system.
 type PodDisruptionBudgetStatus struct {
-	// Most recent generation observed when updating this PDB status. PodDisruptionsAllowed and other
+	// Most recent generation observed when updating this PDB status. DisruptionsAllowed and other
 	// status information is valid only if observedGeneration equals to PDB's object generation.
 	// +optional
 	ObservedGeneration int64 `json:"observedGeneration,omitempty" protobuf:"varint,1,opt,name=observedGeneration"`
@@ -67,7 +67,7 @@ type PodDisruptionBudgetStatus struct {
 	DisruptedPods map[string]metav1.Time `json:"disruptedPods,omitempty" protobuf:"bytes,2,rep,name=disruptedPods"`
 
 	// Number of pod disruptions that are currently allowed.
-	PodDisruptionsAllowed int32 `json:"disruptionsAllowed" protobuf:"varint,3,opt,name=disruptionsAllowed"`
+	DisruptionsAllowed int32 `json:"disruptionsAllowed" protobuf:"varint,3,opt,name=disruptionsAllowed"`
 
 	// current number of healthy pods
 	CurrentHealthy int32 `json:"currentHealthy" protobuf:"varint,4,opt,name=currentHealthy"`
diff --git a/vendor/k8s.io/api/policy/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/policy/v1beta1/types_swagger_doc_generated.go
index 40a951c4..70f667c6 100644
--- a/vendor/k8s.io/api/policy/v1beta1/types_swagger_doc_generated.go
+++ b/vendor/k8s.io/api/policy/v1beta1/types_swagger_doc_generated.go
@@ -126,7 +126,7 @@ func (PodDisruptionBudgetSpec) SwaggerDoc() map[string]string {
 
 var map_PodDisruptionBudgetStatus = map[string]string{
 	"":                   "PodDisruptionBudgetStatus represents information about the status of a PodDisruptionBudget. Status may trail the actual state of a system.",
-	"observedGeneration": "Most recent generation observed when updating this PDB status. PodDisruptionsAllowed and other status information is valid only if observedGeneration equals to PDB's object generation.",
+	"observedGeneration": "Most recent generation observed when updating this PDB status. DisruptionsAllowed and other status information is valid only if observedGeneration equals to PDB's object generation.",
 	"disruptedPods":      "DisruptedPods contains information about pods whose eviction was processed by the API server eviction subresource handler but has not yet been observed by the PodDisruptionBudget controller. A pod will be in this map from the time when the API server processed the eviction request to the time when the pod is seen by PDB controller as having been marked for deletion (or after a timeout). The key in the map is the name of the pod and the value is the time when the API server processed the eviction request. If the deletion didn't occur and a pod is still there it will be removed from the list automatically by PodDisruptionBudget controller after some time. If everything goes smooth this map should be empty for the most of the time. Large number of entries in the map may indicate problems with pod deletions.",
 	"disruptionsAllowed": "Number of pod disruptions that are currently allowed.",
 	"currentHealthy":     "current number of healthy pods",
diff --git a/vendor/k8s.io/api/rbac/v1/generated.pb.go b/vendor/k8s.io/api/rbac/v1/generated.pb.go
index 9bb48fc3..ba6872d6 100644
--- a/vendor/k8s.io/api/rbac/v1/generated.pb.go
+++ b/vendor/k8s.io/api/rbac/v1/generated.pb.go
@@ -42,7 +42,7 @@ var _ = math.Inf
 // is compatible with the proto package it is being compiled against.
 // A compilation error at this line likely means your copy of the
 // proto package needs to be updated.
-const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
+const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
 
 func (m *AggregationRule) Reset()      { *m = AggregationRule{} }
 func (*AggregationRule) ProtoMessage() {}
@@ -3183,6 +3183,7 @@ func (m *Subject) Unmarshal(dAtA []byte) error {
 func skipGenerated(dAtA []byte) (n int, err error) {
 	l := len(dAtA)
 	iNdEx := 0
+	depth := 0
 	for iNdEx < l {
 		var wire uint64
 		for shift := uint(0); ; shift += 7 {
@@ -3214,10 +3215,8 @@ func skipGenerated(dAtA []byte) (n int, err error) {
 					break
 				}
 			}
-			return iNdEx, nil
 		case 1:
 			iNdEx += 8
-			return iNdEx, nil
 		case 2:
 			var length int
 			for shift := uint(0); ; shift += 7 {
@@ -3238,55 +3237,30 @@ func skipGenerated(dAtA []byte) (n int, err error) {
 				return 0, ErrInvalidLengthGenerated
 			}
 			iNdEx += length
-			if iNdEx < 0 {
-				return 0, ErrInvalidLengthGenerated
-			}
-			return iNdEx, nil
 		case 3:
-			for {
-				var innerWire uint64
-				var start int = iNdEx
-				for shift := uint(0); ; shift += 7 {
-					if shift >= 64 {
-						return 0, ErrIntOverflowGenerated
-					}
-					if iNdEx >= l {
-						return 0, io.ErrUnexpectedEOF
-					}
-					b := dAtA[iNdEx]
-					iNdEx++
-					innerWire |= (uint64(b) & 0x7F) << shift
-					if b < 0x80 {
-						break
-					}
-				}
-				innerWireType := int(innerWire & 0x7)
-				if innerWireType == 4 {
-					break
-				}
-				next, err := skipGenerated(dAtA[start:])
-				if err != nil {
-					return 0, err
-				}
-				iNdEx = start + next
-				if iNdEx < 0 {
-					return 0, ErrInvalidLengthGenerated
-				}
-			}
-			return iNdEx, nil
+			depth++
 		case 4:
-			return iNdEx, nil
+			if depth == 0 {
+				return 0, ErrUnexpectedEndOfGroupGenerated
+			}
+			depth--
 		case 5:
 			iNdEx += 4
-			return iNdEx, nil
 		default:
 			return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
 		}
+		if iNdEx < 0 {
+			return 0, ErrInvalidLengthGenerated
+		}
+		if depth == 0 {
+			return iNdEx, nil
+		}
 	}
-	panic("unreachable")
+	return 0, io.ErrUnexpectedEOF
 }
 
 var (
-	ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling")
-	ErrIntOverflowGenerated   = fmt.Errorf("proto: integer overflow")
+	ErrInvalidLengthGenerated        = fmt.Errorf("proto: negative length found during unmarshaling")
+	ErrIntOverflowGenerated          = fmt.Errorf("proto: integer overflow")
+	ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group")
 )
diff --git a/vendor/k8s.io/api/rbac/v1alpha1/generated.pb.go b/vendor/k8s.io/api/rbac/v1alpha1/generated.pb.go
index 19336247..3b12526d 100644
--- a/vendor/k8s.io/api/rbac/v1alpha1/generated.pb.go
+++ b/vendor/k8s.io/api/rbac/v1alpha1/generated.pb.go
@@ -42,7 +42,7 @@ var _ = math.Inf
 // is compatible with the proto package it is being compiled against.
 // A compilation error at this line likely means your copy of the
 // proto package needs to be updated.
-const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
+const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
 
 func (m *AggregationRule) Reset()      { *m = AggregationRule{} }
 func (*AggregationRule) ProtoMessage() {}
@@ -3184,6 +3184,7 @@ func (m *Subject) Unmarshal(dAtA []byte) error {
 func skipGenerated(dAtA []byte) (n int, err error) {
 	l := len(dAtA)
 	iNdEx := 0
+	depth := 0
 	for iNdEx < l {
 		var wire uint64
 		for shift := uint(0); ; shift += 7 {
@@ -3215,10 +3216,8 @@ func skipGenerated(dAtA []byte) (n int, err error) {
 					break
 				}
 			}
-			return iNdEx, nil
 		case 1:
 			iNdEx += 8
-			return iNdEx, nil
 		case 2:
 			var length int
 			for shift := uint(0); ; shift += 7 {
@@ -3239,55 +3238,30 @@ func skipGenerated(dAtA []byte) (n int, err error) {
 				return 0, ErrInvalidLengthGenerated
 			}
 			iNdEx += length
-			if iNdEx < 0 {
-				return 0, ErrInvalidLengthGenerated
-			}
-			return iNdEx, nil
 		case 3:
-			for {
-				var innerWire uint64
-				var start int = iNdEx
-				for shift := uint(0); ; shift += 7 {
-					if shift >= 64 {
-						return 0, ErrIntOverflowGenerated
-					}
-					if iNdEx >= l {
-						return 0, io.ErrUnexpectedEOF
-					}
-					b := dAtA[iNdEx]
-					iNdEx++
-					innerWire |= (uint64(b) & 0x7F) << shift
-					if b < 0x80 {
-						break
-					}
-				}
-				innerWireType := int(innerWire & 0x7)
-				if innerWireType == 4 {
-					break
-				}
-				next, err := skipGenerated(dAtA[start:])
-				if err != nil {
-					return 0, err
-				}
-				iNdEx = start + next
-				if iNdEx < 0 {
-					return 0, ErrInvalidLengthGenerated
-				}
-			}
-			return iNdEx, nil
+			depth++
 		case 4:
-			return iNdEx, nil
+			if depth == 0 {
+				return 0, ErrUnexpectedEndOfGroupGenerated
+			}
+			depth--
 		case 5:
 			iNdEx += 4
-			return iNdEx, nil
 		default:
 			return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
 		}
+		if iNdEx < 0 {
+			return 0, ErrInvalidLengthGenerated
+		}
+		if depth == 0 {
+			return iNdEx, nil
+		}
 	}
-	panic("unreachable")
+	return 0, io.ErrUnexpectedEOF
 }
 
 var (
-	ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling")
-	ErrIntOverflowGenerated   = fmt.Errorf("proto: integer overflow")
+	ErrInvalidLengthGenerated        = fmt.Errorf("proto: negative length found during unmarshaling")
+	ErrIntOverflowGenerated          = fmt.Errorf("proto: integer overflow")
+	ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group")
 )
diff --git a/vendor/k8s.io/api/rbac/v1alpha1/generated.proto b/vendor/k8s.io/api/rbac/v1alpha1/generated.proto
index 5c50a87e..895ab623 100644
--- a/vendor/k8s.io/api/rbac/v1alpha1/generated.proto
+++ b/vendor/k8s.io/api/rbac/v1alpha1/generated.proto
@@ -113,7 +113,6 @@ message PolicyRule {
   repeated string resourceNames = 5;
 
   // NonResourceURLs is a set of partial urls that a user should have access to.  *s are allowed, but only as the full, final step in the path
-  // This name is intentionally different than the internal type so that the DefaultConvert works nicely and because the ordering may be different.
   // Since non-resource URLs are not namespaced, this field is only applicable for ClusterRoles referenced from a ClusterRoleBinding.
   // Rules can either apply to API resources (such as "pods" or "secrets") or non-resource URL paths (such as "/api"),  but not both.
   // +optional
diff --git a/vendor/k8s.io/api/rbac/v1alpha1/types.go b/vendor/k8s.io/api/rbac/v1alpha1/types.go
index a5d3e38f..ba91ab32 100644
--- a/vendor/k8s.io/api/rbac/v1alpha1/types.go
+++ b/vendor/k8s.io/api/rbac/v1alpha1/types.go
@@ -62,7 +62,6 @@ type PolicyRule struct {
 	ResourceNames []string `json:"resourceNames,omitempty" protobuf:"bytes,5,rep,name=resourceNames"`
 
 	// NonResourceURLs is a set of partial urls that a user should have access to.  *s are allowed, but only as the full, final step in the path
-	// This name is intentionally different than the internal type so that the DefaultConvert works nicely and because the ordering may be different.
 	// Since non-resource URLs are not namespaced, this field is only applicable for ClusterRoles referenced from a ClusterRoleBinding.
 	// Rules can either apply to API resources (such as "pods" or "secrets") or non-resource URL paths (such as "/api"),  but not both.
 	// +optional
diff --git a/vendor/k8s.io/api/rbac/v1alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/api/rbac/v1alpha1/types_swagger_doc_generated.go
index 8238de21..eab08c5d 100644
--- a/vendor/k8s.io/api/rbac/v1alpha1/types_swagger_doc_generated.go
+++ b/vendor/k8s.io/api/rbac/v1alpha1/types_swagger_doc_generated.go
@@ -84,7 +84,7 @@ var map_PolicyRule = map[string]string{
 	"apiGroups":       "APIGroups is the name of the APIGroup that contains the resources.  If multiple API groups are specified, any action requested against one of the enumerated resources in any API group will be allowed.",
 	"resources":       "Resources is a list of resources this rule applies to.  ResourceAll represents all resources.",
 	"resourceNames":   "ResourceNames is an optional white list of names that the rule applies to.  An empty set means that everything is allowed.",
-	"nonResourceURLs": "NonResourceURLs is a set of partial urls that a user should have access to.  *s are allowed, but only as the full, final step in the path This name is intentionally different than the internal type so that the DefaultConvert works nicely and because the ordering may be different. Since non-resource URLs are not namespaced, this field is only applicable for ClusterRoles referenced from a ClusterRoleBinding. Rules can either apply to API resources (such as \"pods\" or \"secrets\") or non-resource URL paths (such as \"/api\"),  but not both.",
+	"nonResourceURLs": "NonResourceURLs is a set of partial urls that a user should have access to.  *s are allowed, but only as the full, final step in the path Since non-resource URLs are not namespaced, this field is only applicable for ClusterRoles referenced from a ClusterRoleBinding. Rules can either apply to API resources (such as \"pods\" or \"secrets\") or non-resource URL paths (such as \"/api\"),  but not both.",
 }
 
 func (PolicyRule) SwaggerDoc() map[string]string {
diff --git a/vendor/k8s.io/api/rbac/v1beta1/generated.pb.go b/vendor/k8s.io/api/rbac/v1beta1/generated.pb.go
index 6c80f52f..53d36320 100644
--- a/vendor/k8s.io/api/rbac/v1beta1/generated.pb.go
+++ b/vendor/k8s.io/api/rbac/v1beta1/generated.pb.go
@@ -42,7 +42,7 @@ var _ = math.Inf
 // is compatible with the proto package it is being compiled against.
 // A compilation error at this line likely means your copy of the
 // proto package needs to be updated.
-const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
+const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
 
 func (m *AggregationRule) Reset()      { *m = AggregationRule{} }
 func (*AggregationRule) ProtoMessage() {}
@@ -3183,6 +3183,7 @@ func (m *Subject) Unmarshal(dAtA []byte) error {
 func skipGenerated(dAtA []byte) (n int, err error) {
 	l := len(dAtA)
 	iNdEx := 0
+	depth := 0
 	for iNdEx < l {
 		var wire uint64
 		for shift := uint(0); ; shift += 7 {
@@ -3214,10 +3215,8 @@ func skipGenerated(dAtA []byte) (n int, err error) {
 					break
 				}
 			}
-			return iNdEx, nil
 		case 1:
 			iNdEx += 8
-			return iNdEx, nil
 		case 2:
 			var length int
 			for shift := uint(0); ; shift += 7 {
@@ -3238,55 +3237,30 @@ func skipGenerated(dAtA []byte) (n int, err error) {
 				return 0, ErrInvalidLengthGenerated
 			}
 			iNdEx += length
-			if iNdEx < 0 {
-				return 0, ErrInvalidLengthGenerated
-			}
-			return iNdEx, nil
 		case 3:
-			for {
-				var innerWire uint64
-				var start int = iNdEx
-				for shift := uint(0); ; shift += 7 {
-					if shift >= 64 {
-						return 0, ErrIntOverflowGenerated
-					}
-					if iNdEx >= l {
-						return 0, io.ErrUnexpectedEOF
-					}
-					b := dAtA[iNdEx]
-					iNdEx++
-					innerWire |= (uint64(b) & 0x7F) << shift
-					if b < 0x80 {
-						break
-					}
-				}
-				innerWireType := int(innerWire & 0x7)
-				if innerWireType == 4 {
-					break
-				}
-				next, err := skipGenerated(dAtA[start:])
-				if err != nil {
-					return 0, err
-				}
-				iNdEx = start + next
-				if iNdEx < 0 {
-					return 0, ErrInvalidLengthGenerated
-				}
-			}
-			return iNdEx, nil
+			depth++
 		case 4:
-			return iNdEx, nil
+			if depth == 0 {
+				return 0, ErrUnexpectedEndOfGroupGenerated
+			}
+			depth--
 		case 5:
 			iNdEx += 4
-			return iNdEx, nil
 		default:
 			return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
 		}
+		if iNdEx < 0 {
+			return 0, ErrInvalidLengthGenerated
+		}
+		if depth == 0 {
+			return iNdEx, nil
+		}
 	}
-	panic("unreachable")
+	return 0, io.ErrUnexpectedEOF
 }
 
 var (
-	ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling")
-	ErrIntOverflowGenerated   = fmt.Errorf("proto: integer overflow")
+	ErrInvalidLengthGenerated        = fmt.Errorf("proto: negative length found during unmarshaling")
+	ErrIntOverflowGenerated          = fmt.Errorf("proto: integer overflow")
+	ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group")
 )
diff --git a/vendor/k8s.io/api/scheduling/v1/generated.pb.go b/vendor/k8s.io/api/scheduling/v1/generated.pb.go
index 7e9764f1..efc3102e 100644
--- a/vendor/k8s.io/api/scheduling/v1/generated.pb.go
+++ b/vendor/k8s.io/api/scheduling/v1/generated.pb.go
@@ -43,7 +43,7 @@ var _ = math.Inf
 // is compatible with the proto package it is being compiled against.
 // A compilation error at this line likely means your copy of the
 // proto package needs to be updated.
-const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
+const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
 
 func (m *PriorityClass) Reset()      { *m = PriorityClass{} }
 func (*PriorityClass) ProtoMessage() {}
@@ -652,6 +652,7 @@ func (m *PriorityClassList) Unmarshal(dAtA []byte) error {
 func skipGenerated(dAtA []byte) (n int, err error) {
 	l := len(dAtA)
 	iNdEx := 0
+	depth := 0
 	for iNdEx < l {
 		var wire uint64
 		for shift := uint(0); ; shift += 7 {
@@ -683,10 +684,8 @@ func skipGenerated(dAtA []byte) (n int, err error) {
 					break
 				}
 			}
-			return iNdEx, nil
 		case 1:
 			iNdEx += 8
-			return iNdEx, nil
 		case 2:
 			var length int
 			for shift := uint(0); ; shift += 7 {
@@ -707,55 +706,30 @@ func skipGenerated(dAtA []byte) (n int, err error) {
 				return 0, ErrInvalidLengthGenerated
 			}
 			iNdEx += length
-			if iNdEx < 0 {
-				return 0, ErrInvalidLengthGenerated
-			}
-			return iNdEx, nil
 		case 3:
-			for {
-				var innerWire uint64
-				var start int = iNdEx
-				for shift := uint(0); ; shift += 7 {
-					if shift >= 64 {
-						return 0, ErrIntOverflowGenerated
-					}
-					if iNdEx >= l {
-						return 0, io.ErrUnexpectedEOF
-					}
-					b := dAtA[iNdEx]
-					iNdEx++
-					innerWire |= (uint64(b) & 0x7F) << shift
-					if b < 0x80 {
-						break
-					}
-				}
-				innerWireType := int(innerWire & 0x7)
-				if innerWireType == 4 {
-					break
-				}
-				next, err := skipGenerated(dAtA[start:])
-				if err != nil {
-					return 0, err
-				}
-				iNdEx = start + next
-				if iNdEx < 0 {
-					return 0, ErrInvalidLengthGenerated
-				}
-			}
-			return iNdEx, nil
+			depth++
 		case 4:
-			return iNdEx, nil
+			if depth == 0 {
+				return 0, ErrUnexpectedEndOfGroupGenerated
+			}
+			depth--
 		case 5:
 			iNdEx += 4
-			return iNdEx, nil
 		default:
 			return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
 		}
+		if iNdEx < 0 {
+			return 0, ErrInvalidLengthGenerated
+		}
+		if depth == 0 {
+			return iNdEx, nil
+		}
 	}
-	panic("unreachable")
+	return 0, io.ErrUnexpectedEOF
 }
 
 var (
-	ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling")
-	ErrIntOverflowGenerated   = fmt.Errorf("proto: integer overflow")
+	ErrInvalidLengthGenerated        = fmt.Errorf("proto: negative length found during unmarshaling")
+	ErrIntOverflowGenerated          = fmt.Errorf("proto: integer overflow")
+	ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group")
 )
diff --git a/vendor/k8s.io/api/scheduling/v1alpha1/generated.pb.go b/vendor/k8s.io/api/scheduling/v1alpha1/generated.pb.go
index 05bff0ff..8a62104d 100644
--- a/vendor/k8s.io/api/scheduling/v1alpha1/generated.pb.go
+++ b/vendor/k8s.io/api/scheduling/v1alpha1/generated.pb.go
@@ -43,7 +43,7 @@ var _ = math.Inf
 // is compatible with the proto package it is being compiled against.
 // A compilation error at this line likely means your copy of the
 // proto package needs to be updated.
-const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
+const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
 
 func (m *PriorityClass) Reset()      { *m = PriorityClass{} }
 func (*PriorityClass) ProtoMessage() {}
@@ -652,6 +652,7 @@ func (m *PriorityClassList) Unmarshal(dAtA []byte) error {
 func skipGenerated(dAtA []byte) (n int, err error) {
 	l := len(dAtA)
 	iNdEx := 0
+	depth := 0
 	for iNdEx < l {
 		var wire uint64
 		for shift := uint(0); ; shift += 7 {
@@ -683,10 +684,8 @@ func skipGenerated(dAtA []byte) (n int, err error) {
 					break
 				}
 			}
-			return iNdEx, nil
 		case 1:
 			iNdEx += 8
-			return iNdEx, nil
 		case 2:
 			var length int
 			for shift := uint(0); ; shift += 7 {
@@ -707,55 +706,30 @@ func skipGenerated(dAtA []byte) (n int, err error) {
 				return 0, ErrInvalidLengthGenerated
 			}
 			iNdEx += length
-			if iNdEx < 0 {
-				return 0, ErrInvalidLengthGenerated
-			}
-			return iNdEx, nil
 		case 3:
-			for {
-				var innerWire uint64
-				var start int = iNdEx
-				for shift := uint(0); ; shift += 7 {
-					if shift >= 64 {
-						return 0, ErrIntOverflowGenerated
-					}
-					if iNdEx >= l {
-						return 0, io.ErrUnexpectedEOF
-					}
-					b := dAtA[iNdEx]
-					iNdEx++
-					innerWire |= (uint64(b) & 0x7F) << shift
-					if b < 0x80 {
-						break
-					}
-				}
-				innerWireType := int(innerWire & 0x7)
-				if innerWireType == 4 {
-					break
-				}
-				next, err := skipGenerated(dAtA[start:])
-				if err != nil {
-					return 0, err
-				}
-				iNdEx = start + next
-				if iNdEx < 0 {
-					return 0, ErrInvalidLengthGenerated
-				}
-			}
-			return iNdEx, nil
+			depth++
 		case 4:
-			return iNdEx, nil
+			if depth == 0 {
+				return 0, ErrUnexpectedEndOfGroupGenerated
+			}
+			depth--
 		case 5:
 			iNdEx += 4
-			return iNdEx, nil
 		default:
 			return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
 		}
+		if iNdEx < 0 {
+			return 0, ErrInvalidLengthGenerated
+		}
+		if depth == 0 {
+			return iNdEx, nil
+		}
 	}
-	panic("unreachable")
+	return 0, io.ErrUnexpectedEOF
 }
 
 var (
-	ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling")
-	ErrIntOverflowGenerated   = fmt.Errorf("proto: integer overflow")
+	ErrInvalidLengthGenerated        = fmt.Errorf("proto: negative length found during unmarshaling")
+	ErrIntOverflowGenerated          = fmt.Errorf("proto: integer overflow")
+	ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group")
 )
diff --git a/vendor/k8s.io/api/scheduling/v1beta1/generated.pb.go b/vendor/k8s.io/api/scheduling/v1beta1/generated.pb.go
index 198fcd02..b89af56b 100644
--- a/vendor/k8s.io/api/scheduling/v1beta1/generated.pb.go
+++ b/vendor/k8s.io/api/scheduling/v1beta1/generated.pb.go
@@ -43,7 +43,7 @@ var _ = math.Inf
 // is compatible with the proto package it is being compiled against.
 // A compilation error at this line likely means your copy of the
 // proto package needs to be updated.
-const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
+const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
 
 func (m *PriorityClass) Reset()      { *m = PriorityClass{} }
 func (*PriorityClass) ProtoMessage() {}
@@ -652,6 +652,7 @@ func (m *PriorityClassList) Unmarshal(dAtA []byte) error {
 func skipGenerated(dAtA []byte) (n int, err error) {
 	l := len(dAtA)
 	iNdEx := 0
+	depth := 0
 	for iNdEx < l {
 		var wire uint64
 		for shift := uint(0); ; shift += 7 {
@@ -683,10 +684,8 @@ func skipGenerated(dAtA []byte) (n int, err error) {
 					break
 				}
 			}
-			return iNdEx, nil
 		case 1:
 			iNdEx += 8
-			return iNdEx, nil
 		case 2:
 			var length int
 			for shift := uint(0); ; shift += 7 {
@@ -707,55 +706,30 @@ func skipGenerated(dAtA []byte) (n int, err error) {
 				return 0, ErrInvalidLengthGenerated
 			}
 			iNdEx += length
-			if iNdEx < 0 {
-				return 0, ErrInvalidLengthGenerated
-			}
-			return iNdEx, nil
 		case 3:
-			for {
-				var innerWire uint64
-				var start int = iNdEx
-				for shift := uint(0); ; shift += 7 {
-					if shift >= 64 {
-						return 0, ErrIntOverflowGenerated
-					}
-					if iNdEx >= l {
-						return 0, io.ErrUnexpectedEOF
-					}
-					b := dAtA[iNdEx]
-					iNdEx++
-					innerWire |= (uint64(b) & 0x7F) << shift
-					if b < 0x80 {
-						break
-					}
-				}
-				innerWireType := int(innerWire & 0x7)
-				if innerWireType == 4 {
-					break
-				}
-				next, err := skipGenerated(dAtA[start:])
-				if err != nil {
-					return 0, err
-				}
-				iNdEx = start + next
-				if iNdEx < 0 {
-					return 0, ErrInvalidLengthGenerated
-				}
-			}
-			return iNdEx, nil
+			depth++
 		case 4:
-			return iNdEx, nil
+			if depth == 0 {
+				return 0, ErrUnexpectedEndOfGroupGenerated
+			}
+			depth--
 		case 5:
 			iNdEx += 4
-			return iNdEx, nil
 		default:
 			return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
 		}
+		if iNdEx < 0 {
+			return 0, ErrInvalidLengthGenerated
+		}
+		if depth == 0 {
+			return iNdEx, nil
+		}
 	}
-	panic("unreachable")
+	return 0, io.ErrUnexpectedEOF
 }
 
 var (
-	ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling")
-	ErrIntOverflowGenerated   = fmt.Errorf("proto: integer overflow")
+	ErrInvalidLengthGenerated        = fmt.Errorf("proto: negative length found during unmarshaling")
+	ErrIntOverflowGenerated          = fmt.Errorf("proto: integer overflow")
+	ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group")
 )
diff --git a/vendor/k8s.io/api/settings/v1alpha1/generated.pb.go b/vendor/k8s.io/api/settings/v1alpha1/generated.pb.go
index 7469f74a..7ed066d3 100644
--- a/vendor/k8s.io/api/settings/v1alpha1/generated.pb.go
+++ b/vendor/k8s.io/api/settings/v1alpha1/generated.pb.go
@@ -42,7 +42,7 @@ var _ = math.Inf
 // is compatible with the proto package it is being compiled against.
 // A compilation error at this line likely means your copy of the
 // proto package needs to be updated.
-const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
+const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
 
 func (m *PodPreset) Reset()      { *m = PodPreset{} }
 func (*PodPreset) ProtoMessage() {}
@@ -970,6 +970,7 @@ func (m *PodPresetSpec) Unmarshal(dAtA []byte) error {
 func skipGenerated(dAtA []byte) (n int, err error) {
 	l := len(dAtA)
 	iNdEx := 0
+	depth := 0
 	for iNdEx < l {
 		var wire uint64
 		for shift := uint(0); ; shift += 7 {
@@ -1001,10 +1002,8 @@ func skipGenerated(dAtA []byte) (n int, err error) {
 					break
 				}
 			}
-			return iNdEx, nil
 		case 1:
 			iNdEx += 8
-			return iNdEx, nil
 		case 2:
 			var length int
 			for shift := uint(0); ; shift += 7 {
@@ -1025,55 +1024,30 @@ func skipGenerated(dAtA []byte) (n int, err error) {
 				return 0, ErrInvalidLengthGenerated
 			}
 			iNdEx += length
-			if iNdEx < 0 {
-				return 0, ErrInvalidLengthGenerated
-			}
-			return iNdEx, nil
 		case 3:
-			for {
-				var innerWire uint64
-				var start int = iNdEx
-				for shift := uint(0); ; shift += 7 {
-					if shift >= 64 {
-						return 0, ErrIntOverflowGenerated
-					}
-					if iNdEx >= l {
-						return 0, io.ErrUnexpectedEOF
-					}
-					b := dAtA[iNdEx]
-					iNdEx++
-					innerWire |= (uint64(b) & 0x7F) << shift
-					if b < 0x80 {
-						break
-					}
-				}
-				innerWireType := int(innerWire & 0x7)
-				if innerWireType == 4 {
-					break
-				}
-				next, err := skipGenerated(dAtA[start:])
-				if err != nil {
-					return 0, err
-				}
-				iNdEx = start + next
-				if iNdEx < 0 {
-					return 0, ErrInvalidLengthGenerated
-				}
-			}
-			return iNdEx, nil
+			depth++
 		case 4:
-			return iNdEx, nil
+			if depth == 0 {
+				return 0, ErrUnexpectedEndOfGroupGenerated
+			}
+			depth--
 		case 5:
 			iNdEx += 4
-			return iNdEx, nil
 		default:
 			return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
 		}
+		if iNdEx < 0 {
+			return 0, ErrInvalidLengthGenerated
+		}
+		if depth == 0 {
+			return iNdEx, nil
+		}
 	}
-	panic("unreachable")
+	return 0, io.ErrUnexpectedEOF
 }
 
 var (
-	ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling")
-	ErrIntOverflowGenerated   = fmt.Errorf("proto: integer overflow")
+	ErrInvalidLengthGenerated        = fmt.Errorf("proto: negative length found during unmarshaling")
+	ErrIntOverflowGenerated          = fmt.Errorf("proto: integer overflow")
+	ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group")
 )
diff --git a/vendor/k8s.io/api/storage/v1/generated.pb.go b/vendor/k8s.io/api/storage/v1/generated.pb.go
index 3d09ee7e..9e573691 100644
--- a/vendor/k8s.io/api/storage/v1/generated.pb.go
+++ b/vendor/k8s.io/api/storage/v1/generated.pb.go
@@ -44,12 +44,96 @@ var _ = math.Inf
 // is compatible with the proto package it is being compiled against.
 // A compilation error at this line likely means your copy of the
 // proto package needs to be updated.
-const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
+const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
+
+func (m *CSIDriver) Reset()      { *m = CSIDriver{} }
+func (*CSIDriver) ProtoMessage() {}
+func (*CSIDriver) Descriptor() ([]byte, []int) {
+	return fileDescriptor_3b530c1983504d8d, []int{0}
+}
+func (m *CSIDriver) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *CSIDriver) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *CSIDriver) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_CSIDriver.Merge(m, src)
+}
+func (m *CSIDriver) XXX_Size() int {
+	return m.Size()
+}
+func (m *CSIDriver) XXX_DiscardUnknown() {
+	xxx_messageInfo_CSIDriver.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_CSIDriver proto.InternalMessageInfo
+
+func (m *CSIDriverList) Reset()      { *m = CSIDriverList{} }
+func (*CSIDriverList) ProtoMessage() {}
+func (*CSIDriverList) Descriptor() ([]byte, []int) {
+	return fileDescriptor_3b530c1983504d8d, []int{1}
+}
+func (m *CSIDriverList) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *CSIDriverList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *CSIDriverList) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_CSIDriverList.Merge(m, src)
+}
+func (m *CSIDriverList) XXX_Size() int {
+	return m.Size()
+}
+func (m *CSIDriverList) XXX_DiscardUnknown() {
+	xxx_messageInfo_CSIDriverList.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_CSIDriverList proto.InternalMessageInfo
+
+func (m *CSIDriverSpec) Reset()      { *m = CSIDriverSpec{} }
+func (*CSIDriverSpec) ProtoMessage() {}
+func (*CSIDriverSpec) Descriptor() ([]byte, []int) {
+	return fileDescriptor_3b530c1983504d8d, []int{2}
+}
+func (m *CSIDriverSpec) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *CSIDriverSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	b = b[:cap(b)]
+	n, err := m.MarshalToSizedBuffer(b)
+	if err != nil {
+		return nil, err
+	}
+	return b[:n], nil
+}
+func (m *CSIDriverSpec) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_CSIDriverSpec.Merge(m, src)
+}
+func (m *CSIDriverSpec) XXX_Size() int {
+	return m.Size()
+}
+func (m *CSIDriverSpec) XXX_DiscardUnknown() {
+	xxx_messageInfo_CSIDriverSpec.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_CSIDriverSpec proto.InternalMessageInfo
 
 func (m *CSINode) Reset()      { *m = CSINode{} }
 func (*CSINode) ProtoMessage() {}
 func (*CSINode) Descriptor() ([]byte, []int) {
-	return fileDescriptor_3b530c1983504d8d, []int{0}
+	return fileDescriptor_3b530c1983504d8d, []int{3}
 }
 func (m *CSINode) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -77,7 +161,7 @@ var xxx_messageInfo_CSINode proto.InternalMessageInfo
 func (m *CSINodeDriver) Reset()      { *m = CSINodeDriver{} }
 func (*CSINodeDriver) ProtoMessage() {}
 func (*CSINodeDriver) Descriptor() ([]byte, []int) {
-	return fileDescriptor_3b530c1983504d8d, []int{1}
+	return fileDescriptor_3b530c1983504d8d, []int{4}
 }
 func (m *CSINodeDriver) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -105,7 +189,7 @@ var xxx_messageInfo_CSINodeDriver proto.InternalMessageInfo
 func (m *CSINodeList) Reset()      { *m = CSINodeList{} }
 func (*CSINodeList) ProtoMessage() {}
 func (*CSINodeList) Descriptor() ([]byte, []int) {
-	return fileDescriptor_3b530c1983504d8d, []int{2}
+	return fileDescriptor_3b530c1983504d8d, []int{5}
 }
 func (m *CSINodeList) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -133,7 +217,7 @@ var xxx_messageInfo_CSINodeList proto.InternalMessageInfo
 func (m *CSINodeSpec) Reset()      { *m = CSINodeSpec{} }
 func (*CSINodeSpec) ProtoMessage() {}
 func (*CSINodeSpec) Descriptor() ([]byte, []int) {
-	return fileDescriptor_3b530c1983504d8d, []int{3}
+	return fileDescriptor_3b530c1983504d8d, []int{6}
 }
 func (m *CSINodeSpec) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -161,7 +245,7 @@ var xxx_messageInfo_CSINodeSpec proto.InternalMessageInfo
 func (m *StorageClass) Reset()      { *m = StorageClass{} }
 func (*StorageClass) ProtoMessage() {}
 func (*StorageClass) Descriptor() ([]byte, []int) {
-	return fileDescriptor_3b530c1983504d8d, []int{4}
+	return fileDescriptor_3b530c1983504d8d, []int{7}
 }
 func (m *StorageClass) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -189,7 +273,7 @@ var xxx_messageInfo_StorageClass proto.InternalMessageInfo
 func (m *StorageClassList) Reset()      { *m = StorageClassList{} }
 func (*StorageClassList) ProtoMessage() {}
 func (*StorageClassList) Descriptor() ([]byte, []int) {
-	return fileDescriptor_3b530c1983504d8d, []int{5}
+	return fileDescriptor_3b530c1983504d8d, []int{8}
 }
 func (m *StorageClassList) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -217,7 +301,7 @@ var xxx_messageInfo_StorageClassList proto.InternalMessageInfo
 func (m *VolumeAttachment) Reset()      { *m = VolumeAttachment{} }
 func (*VolumeAttachment) ProtoMessage() {}
 func (*VolumeAttachment) Descriptor() ([]byte, []int) {
-	return fileDescriptor_3b530c1983504d8d, []int{6}
+	return fileDescriptor_3b530c1983504d8d, []int{9}
 }
 func (m *VolumeAttachment) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -245,7 +329,7 @@ var xxx_messageInfo_VolumeAttachment proto.InternalMessageInfo
 func (m *VolumeAttachmentList) Reset()      { *m = VolumeAttachmentList{} }
 func (*VolumeAttachmentList) ProtoMessage() {}
 func (*VolumeAttachmentList) Descriptor() ([]byte, []int) {
-	return fileDescriptor_3b530c1983504d8d, []int{7}
+	return fileDescriptor_3b530c1983504d8d, []int{10}
 }
 func (m *VolumeAttachmentList) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -273,7 +357,7 @@ var xxx_messageInfo_VolumeAttachmentList proto.InternalMessageInfo
 func (m *VolumeAttachmentSource) Reset()      { *m = VolumeAttachmentSource{} }
 func (*VolumeAttachmentSource) ProtoMessage() {}
 func (*VolumeAttachmentSource) Descriptor() ([]byte, []int) {
-	return fileDescriptor_3b530c1983504d8d, []int{8}
+	return fileDescriptor_3b530c1983504d8d, []int{11}
 }
 func (m *VolumeAttachmentSource) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -301,7 +385,7 @@ var xxx_messageInfo_VolumeAttachmentSource proto.InternalMessageInfo
 func (m *VolumeAttachmentSpec) Reset()      { *m = VolumeAttachmentSpec{} }
 func (*VolumeAttachmentSpec) ProtoMessage() {}
 func (*VolumeAttachmentSpec) Descriptor() ([]byte, []int) {
-	return fileDescriptor_3b530c1983504d8d, []int{9}
+	return fileDescriptor_3b530c1983504d8d, []int{12}
 }
 func (m *VolumeAttachmentSpec) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -329,7 +413,7 @@ var xxx_messageInfo_VolumeAttachmentSpec proto.InternalMessageInfo
 func (m *VolumeAttachmentStatus) Reset()      { *m = VolumeAttachmentStatus{} }
 func (*VolumeAttachmentStatus) ProtoMessage() {}
 func (*VolumeAttachmentStatus) Descriptor() ([]byte, []int) {
-	return fileDescriptor_3b530c1983504d8d, []int{10}
+	return fileDescriptor_3b530c1983504d8d, []int{13}
 }
 func (m *VolumeAttachmentStatus) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -357,7 +441,7 @@ var xxx_messageInfo_VolumeAttachmentStatus proto.InternalMessageInfo
 func (m *VolumeError) Reset()      { *m = VolumeError{} }
 func (*VolumeError) ProtoMessage() {}
 func (*VolumeError) Descriptor() ([]byte, []int) {
-	return fileDescriptor_3b530c1983504d8d, []int{11}
+	return fileDescriptor_3b530c1983504d8d, []int{14}
 }
 func (m *VolumeError) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -385,7 +469,7 @@ var xxx_messageInfo_VolumeError proto.InternalMessageInfo
 func (m *VolumeNodeResources) Reset()      { *m = VolumeNodeResources{} }
 func (*VolumeNodeResources) ProtoMessage() {}
 func (*VolumeNodeResources) Descriptor() ([]byte, []int) {
-	return fileDescriptor_3b530c1983504d8d, []int{12}
+	return fileDescriptor_3b530c1983504d8d, []int{15}
 }
 func (m *VolumeNodeResources) XXX_Unmarshal(b []byte) error {
 	return m.Unmarshal(b)
@@ -411,6 +495,9 @@ func (m *VolumeNodeResources) XXX_DiscardUnknown() {
 var xxx_messageInfo_VolumeNodeResources proto.InternalMessageInfo
 
 func init() {
+	proto.RegisterType((*CSIDriver)(nil), "k8s.io.api.storage.v1.CSIDriver")
+	proto.RegisterType((*CSIDriverList)(nil), "k8s.io.api.storage.v1.CSIDriverList")
+	proto.RegisterType((*CSIDriverSpec)(nil), "k8s.io.api.storage.v1.CSIDriverSpec")
 	proto.RegisterType((*CSINode)(nil), "k8s.io.api.storage.v1.CSINode")
 	proto.RegisterType((*CSINodeDriver)(nil), "k8s.io.api.storage.v1.CSINodeDriver")
 	proto.RegisterType((*CSINodeList)(nil), "k8s.io.api.storage.v1.CSINodeList")
@@ -433,83 +520,233 @@ func init() {
 }
 
 var fileDescriptor_3b530c1983504d8d = []byte{
-	// 1212 bytes of a gzipped FileDescriptorProto
-	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x57, 0x41, 0x6f, 0xe3, 0x44,
-	0x14, 0xae, 0x9b, 0xa4, 0x4d, 0x27, 0x2d, 0x9b, 0xce, 0x16, 0x08, 0x39, 0x24, 0x95, 0x41, 0x10,
-	0x0a, 0xeb, 0x6c, 0x97, 0x65, 0xb5, 0x42, 0x02, 0x29, 0x6e, 0x23, 0x51, 0xd1, 0xb4, 0xd5, 0xb4,
-	0xac, 0x10, 0x02, 0xc4, 0xd4, 0x1e, 0x52, 0x6f, 0x62, 0x8f, 0xf1, 0x4c, 0x02, 0xb9, 0x71, 0xe2,
-	0x86, 0x04, 0x57, 0x7e, 0x05, 0x5c, 0x39, 0x72, 0x2a, 0xb7, 0x15, 0xa7, 0x3d, 0x45, 0xd4, 0x9c,
-	0xe1, 0x07, 0xf4, 0x84, 0x66, 0x3c, 0x8d, 0x9d, 0xc4, 0x29, 0xe9, 0xa5, 0xb7, 0xcc, 0x9b, 0xf7,
-	0x7d, 0xef, 0xbd, 0xf9, 0xde, 0xbc, 0x71, 0xc0, 0x07, 0x9d, 0xc7, 0xcc, 0x70, 0x68, 0xbd, 0xd3,
-	0x3b, 0x25, 0x81, 0x47, 0x38, 0x61, 0xf5, 0x3e, 0xf1, 0x6c, 0x1a, 0xd4, 0xd5, 0x06, 0xf6, 0x9d,
-	0x3a, 0xe3, 0x34, 0xc0, 0x6d, 0x52, 0xef, 0x6f, 0xd7, 0xdb, 0xc4, 0x23, 0x01, 0xe6, 0xc4, 0x36,
-	0xfc, 0x80, 0x72, 0x0a, 0x5f, 0x8c, 0xdc, 0x0c, 0xec, 0x3b, 0x86, 0x72, 0x33, 0xfa, 0xdb, 0xe5,
-	0x7b, 0x6d, 0x87, 0x9f, 0xf5, 0x4e, 0x0d, 0x8b, 0xba, 0xf5, 0x36, 0x6d, 0xd3, 0xba, 0xf4, 0x3e,
-	0xed, 0x7d, 0x25, 0x57, 0x72, 0x21, 0x7f, 0x45, 0x2c, 0x65, 0x3d, 0x11, 0xcc, 0xa2, 0x41, 0x5a,
-	0xa4, 0xf2, 0xc3, 0xd8, 0xc7, 0xc5, 0xd6, 0x99, 0xe3, 0x91, 0x60, 0x50, 0xf7, 0x3b, 0x6d, 0x61,
-	0x60, 0x75, 0x97, 0x70, 0x9c, 0x86, 0xaa, 0xcf, 0x42, 0x05, 0x3d, 0x8f, 0x3b, 0x2e, 0x99, 0x02,
-	0x3c, 0xfa, 0x3f, 0x00, 0xb3, 0xce, 0x88, 0x8b, 0x27, 0x71, 0xfa, 0xaf, 0x1a, 0x58, 0xde, 0x39,
-	0xde, 0x3b, 0xa0, 0x36, 0x81, 0x5f, 0x82, 0xbc, 0xc8, 0xc7, 0xc6, 0x1c, 0x97, 0xb4, 0x4d, 0xad,
-	0x56, 0x78, 0x70, 0xdf, 0x88, 0xcf, 0x69, 0x44, 0x6b, 0xf8, 0x9d, 0xb6, 0x30, 0x30, 0x43, 0x78,
-	0x1b, 0xfd, 0x6d, 0xe3, 0xf0, 0xf4, 0x29, 0xb1, 0x78, 0x8b, 0x70, 0x6c, 0xc2, 0xf3, 0x61, 0x75,
-	0x21, 0x1c, 0x56, 0x41, 0x6c, 0x43, 0x23, 0x56, 0xb8, 0x0b, 0xb2, 0xcc, 0x27, 0x56, 0x69, 0x51,
-	0xb2, 0xeb, 0x46, 0xaa, 0x0a, 0x86, 0xca, 0xe7, 0xd8, 0x27, 0x96, 0xb9, 0xaa, 0xf8, 0xb2, 0x62,
-	0x85, 0x24, 0x5a, 0xff, 0x57, 0x03, 0x6b, 0xca, 0x67, 0x37, 0x70, 0xfa, 0x24, 0x80, 0x9b, 0x20,
-	0xeb, 0x61, 0x97, 0xc8, 0xac, 0x57, 0x62, 0xcc, 0x01, 0x76, 0x09, 0x92, 0x3b, 0xf0, 0x75, 0xb0,
-	0xe4, 0x51, 0x9b, 0xec, 0xed, 0xca, 0xd8, 0x2b, 0xe6, 0x0b, 0xca, 0x67, 0xe9, 0x40, 0x5a, 0x91,
-	0xda, 0x85, 0x0f, 0xc1, 0x2a, 0xa7, 0x3e, 0xed, 0xd2, 0xf6, 0xe0, 0x23, 0x32, 0x60, 0xa5, 0xcc,
-	0x66, 0xa6, 0xb6, 0x62, 0x16, 0xc3, 0x61, 0x75, 0xf5, 0x24, 0x61, 0x47, 0x63, 0x5e, 0xf0, 0x73,
-	0x50, 0xc0, 0xdd, 0x2e, 0xb5, 0x30, 0xc7, 0xa7, 0x5d, 0x52, 0xca, 0xca, 0xf2, 0xb6, 0x66, 0x94,
-	0xf7, 0x84, 0x76, 0x7b, 0x2e, 0x11, 0x71, 0x11, 0x61, 0xb4, 0x17, 0x58, 0x84, 0x99, 0x77, 0xc2,
-	0x61, 0xb5, 0xd0, 0x88, 0x29, 0x50, 0x92, 0x4f, 0xff, 0x45, 0x03, 0x05, 0x55, 0xf0, 0xbe, 0xc3,
-	0x38, 0xfc, 0x6c, 0x4a, 0x28, 0x63, 0x3e, 0xa1, 0x04, 0x5a, 0xca, 0x54, 0x54, 0xe5, 0xe7, 0xaf,
-	0x2c, 0x09, 0x91, 0x76, 0x40, 0xce, 0xe1, 0xc4, 0x65, 0xa5, 0xc5, 0xcd, 0x4c, 0xad, 0xf0, 0xa0,
-	0x72, 0xbd, 0x4a, 0xe6, 0x9a, 0xa2, 0xca, 0xed, 0x09, 0x10, 0x8a, 0xb0, 0xfa, 0x17, 0xa3, 0x8c,
-	0x85, 0x70, 0xf0, 0x10, 0x2c, 0xdb, 0x52, 0x2a, 0x56, 0xd2, 0x24, 0xeb, 0x6b, 0xd7, 0xb3, 0x46,
-	0xba, 0x9a, 0x77, 0x14, 0xf7, 0x72, 0xb4, 0x66, 0xe8, 0x8a, 0x45, 0xff, 0x61, 0x09, 0xac, 0x1e,
-	0x47, 0xb0, 0x9d, 0x2e, 0x66, 0xec, 0x16, 0x9a, 0xf7, 0x5d, 0x50, 0xf0, 0x03, 0xda, 0x77, 0x98,
-	0x43, 0x3d, 0x12, 0xa8, 0x3e, 0xba, 0xab, 0x20, 0x85, 0xa3, 0x78, 0x0b, 0x25, 0xfd, 0x60, 0x1b,
-	0x00, 0x1f, 0x07, 0xd8, 0x25, 0x5c, 0x54, 0x9f, 0x91, 0xd5, 0xbf, 0x33, 0xa3, 0xfa, 0x64, 0x45,
-	0xc6, 0xd1, 0x08, 0xd5, 0xf4, 0x78, 0x30, 0x88, 0xb3, 0x8b, 0x37, 0x50, 0x82, 0x1a, 0x76, 0xc0,
-	0x5a, 0x40, 0xac, 0x2e, 0x76, 0xdc, 0x23, 0xda, 0x75, 0xac, 0x81, 0x6c, 0xc3, 0x15, 0xb3, 0x19,
-	0x0e, 0xab, 0x6b, 0x28, 0xb9, 0x71, 0x39, 0xac, 0xde, 0x9f, 0x9e, 0x5c, 0xc6, 0x11, 0x09, 0x98,
-	0xc3, 0x38, 0xf1, 0x78, 0xd4, 0xa1, 0x63, 0x18, 0x34, 0xce, 0x2d, 0xee, 0x89, 0x4b, 0x7b, 0x1e,
-	0x3f, 0xf4, 0xb9, 0x43, 0x3d, 0x56, 0xca, 0xc5, 0xf7, 0xa4, 0x95, 0xb0, 0xa3, 0x31, 0x2f, 0xb8,
-	0x0f, 0x36, 0x44, 0x5f, 0x7f, 0x13, 0x05, 0x68, 0x7e, 0xeb, 0x63, 0x4f, 0x9c, 0x52, 0x69, 0x69,
-	0x53, 0xab, 0xe5, 0xcd, 0x52, 0x38, 0xac, 0x6e, 0x34, 0x52, 0xf6, 0x51, 0x2a, 0x0a, 0x7e, 0x02,
-	0xd6, 0xfb, 0xd2, 0x64, 0x3a, 0x9e, 0xed, 0x78, 0xed, 0x16, 0xb5, 0x49, 0x69, 0x59, 0x16, 0xbd,
-	0x15, 0x0e, 0xab, 0xeb, 0x4f, 0x26, 0x37, 0x2f, 0xd3, 0x8c, 0x68, 0x9a, 0x04, 0x7e, 0x0d, 0xd6,
-	0x65, 0x44, 0x62, 0xab, 0x4b, 0xef, 0x10, 0x56, 0xca, 0x4b, 0xe9, 0x6a, 0x49, 0xe9, 0xc4, 0xd1,
-	0x09, 0xdd, 0xae, 0x46, 0xc3, 0x31, 0xe9, 0x12, 0x8b, 0xd3, 0xe0, 0x84, 0x04, 0xae, 0xf9, 0x8a,
-	0xd2, 0x6b, 0xbd, 0x31, 0x49, 0x85, 0xa6, 0xd9, 0xcb, 0xef, 0x83, 0x3b, 0x13, 0x82, 0xc3, 0x22,
-	0xc8, 0x74, 0xc8, 0x20, 0x1a, 0x6a, 0x48, 0xfc, 0x84, 0x1b, 0x20, 0xd7, 0xc7, 0xdd, 0x1e, 0x89,
-	0x9a, 0x0f, 0x45, 0x8b, 0xf7, 0x16, 0x1f, 0x6b, 0xfa, 0x6f, 0x1a, 0x28, 0x26, 0xbb, 0xe7, 0x16,
-	0xe6, 0xc4, 0x87, 0xe3, 0x73, 0xe2, 0xd5, 0x39, 0x7a, 0x7a, 0xc6, 0xb0, 0xf8, 0x79, 0x11, 0x14,
-	0x23, 0x5d, 0x1a, 0x9c, 0x63, 0xeb, 0xcc, 0x25, 0x1e, 0xbf, 0x85, 0x0b, 0xdd, 0x1a, 0x7b, 0x8d,
-	0xde, 0xba, 0x76, 0x5c, 0xc7, 0x89, 0xcd, 0x7a, 0x96, 0xe0, 0xc7, 0x60, 0x89, 0x71, 0xcc, 0x7b,
-	0xe2, 0x92, 0x0b, 0xc2, 0x7b, 0xf3, 0x12, 0x4a, 0x50, 0xfc, 0x22, 0x45, 0x6b, 0xa4, 0xc8, 0xf4,
-	0xdf, 0x35, 0xb0, 0x31, 0x09, 0xb9, 0x05, 0x75, 0xf7, 0xc7, 0xd5, 0x7d, 0x63, 0xce, 0x62, 0x66,
-	0x28, 0xfc, 0xa7, 0x06, 0x5e, 0x9a, 0xaa, 0x5b, 0xbe, 0x7d, 0x62, 0x26, 0xf8, 0x13, 0x93, 0xe7,
-	0x20, 0x7e, 0xcb, 0xe5, 0x4c, 0x38, 0x4a, 0xd9, 0x47, 0xa9, 0x28, 0xf8, 0x14, 0x14, 0x1d, 0xaf,
-	0xeb, 0x78, 0x24, 0xb2, 0x1d, 0xc7, 0xfa, 0xa6, 0x5e, 0xdc, 0x49, 0x66, 0x29, 0xee, 0x46, 0x38,
-	0xac, 0x16, 0xf7, 0x26, 0x58, 0xd0, 0x14, 0xaf, 0xfe, 0x47, 0x8a, 0x32, 0xf2, 0xb5, 0x7b, 0x1b,
-	0xe4, 0xb1, 0xb4, 0x90, 0x40, 0x95, 0x31, 0x3a, 0xe9, 0x86, 0xb2, 0xa3, 0x91, 0x87, 0xec, 0x1b,
-	0x79, 0x14, 0x2a, 0xd1, 0xb9, 0xfb, 0x46, 0x82, 0x12, 0x7d, 0x23, 0xd7, 0x48, 0x91, 0x89, 0x24,
-	0xc4, 0x37, 0x8d, 0x3c, 0xcb, 0xcc, 0x78, 0x12, 0x07, 0xca, 0x8e, 0x46, 0x1e, 0xfa, 0x3f, 0x99,
-	0x14, 0x81, 0x64, 0x03, 0x26, 0xaa, 0xb1, 0x65, 0x35, 0xf9, 0xa9, 0x6a, 0xec, 0x51, 0x35, 0x36,
-	0xfc, 0x49, 0x03, 0x10, 0x8f, 0x28, 0x5a, 0x57, 0x0d, 0x1a, 0x75, 0x51, 0xf3, 0x46, 0x57, 0xc2,
-	0x68, 0x4c, 0xf1, 0x44, 0x2f, 0x61, 0x59, 0xc5, 0x87, 0xd3, 0x0e, 0x28, 0x25, 0x38, 0xb4, 0x41,
-	0x21, 0xb2, 0x36, 0x83, 0x80, 0x06, 0xea, 0x7a, 0xea, 0xd7, 0xe6, 0x22, 0x3d, 0xcd, 0x8a, 0xfc,
-	0x2c, 0x8b, 0xa1, 0x97, 0xc3, 0x6a, 0x21, 0xb1, 0x8f, 0x92, 0xb4, 0x22, 0x8a, 0x4d, 0xe2, 0x28,
-	0xd9, 0x9b, 0x45, 0xd9, 0x25, 0xb3, 0xa3, 0x24, 0x68, 0xcb, 0x4d, 0xf0, 0xf2, 0x8c, 0x63, 0xb9,
-	0xd1, 0x7b, 0xf1, 0xbd, 0x06, 0x92, 0x31, 0xe0, 0x3e, 0xc8, 0x8a, 0xbf, 0x09, 0x6a, 0x90, 0x6c,
-	0xcd, 0x37, 0x48, 0x4e, 0x1c, 0x97, 0xc4, 0xa3, 0x50, 0xac, 0x90, 0x64, 0x81, 0x6f, 0x82, 0x65,
-	0x97, 0x30, 0x86, 0xdb, 0x2a, 0x72, 0xfc, 0x21, 0xd7, 0x8a, 0xcc, 0xe8, 0x6a, 0x5f, 0x7f, 0x04,
-	0xee, 0xa6, 0x7c, 0x10, 0xc3, 0x2a, 0xc8, 0x59, 0xe2, 0xcb, 0x41, 0x26, 0x94, 0x33, 0x57, 0xc4,
-	0x44, 0xd9, 0x11, 0x06, 0x14, 0xd9, 0xcd, 0xda, 0xf9, 0x45, 0x65, 0xe1, 0xd9, 0x45, 0x65, 0xe1,
-	0xf9, 0x45, 0x65, 0xe1, 0xbb, 0xb0, 0xa2, 0x9d, 0x87, 0x15, 0xed, 0x59, 0x58, 0xd1, 0x9e, 0x87,
-	0x15, 0xed, 0xaf, 0xb0, 0xa2, 0xfd, 0xf8, 0x77, 0x65, 0xe1, 0xd3, 0xc5, 0xfe, 0xf6, 0x7f, 0x01,
-	0x00, 0x00, 0xff, 0xff, 0x5c, 0x59, 0x23, 0xb9, 0x2c, 0x0e, 0x00, 0x00,
+	// 1336 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x57, 0x4f, 0x6f, 0x1b, 0x45,
+	0x14, 0xcf, 0xc6, 0xf9, 0x3b, 0x4e, 0x5a, 0x67, 0x1a, 0xc0, 0xe4, 0xe0, 0x8d, 0x96, 0x0a, 0x42,
+	0xa1, 0xeb, 0xa6, 0x94, 0xaa, 0xaa, 0x04, 0x52, 0x36, 0x31, 0x22, 0x22, 0x4e, 0xa2, 0x49, 0xa9,
+	0x10, 0x02, 0xc4, 0x64, 0xf7, 0xd5, 0xd9, 0xc6, 0xbb, 0xb3, 0xdd, 0x1d, 0x1b, 0x7c, 0xe3, 0xc4,
+	0x0d, 0x09, 0xae, 0x7c, 0x0a, 0x90, 0xe0, 0xc2, 0x91, 0x53, 0xb9, 0x55, 0x9c, 0x7a, 0xb2, 0xe8,
+	0x72, 0x05, 0x3e, 0x40, 0x4e, 0x68, 0x66, 0xc7, 0xde, 0xb5, 0xbd, 0x4e, 0xd3, 0x8b, 0x6f, 0x9e,
+	0xf7, 0xde, 0xef, 0xf7, 0xde, 0x9b, 0xf7, 0x67, 0xd6, 0xe8, 0xfd, 0xd3, 0x3b, 0x91, 0xe9, 0xb2,
+	0xea, 0x69, 0xeb, 0x18, 0x42, 0x1f, 0x38, 0x44, 0xd5, 0x36, 0xf8, 0x0e, 0x0b, 0xab, 0x4a, 0x41,
+	0x03, 0xb7, 0x1a, 0x71, 0x16, 0xd2, 0x06, 0x54, 0xdb, 0x9b, 0xd5, 0x06, 0xf8, 0x10, 0x52, 0x0e,
+	0x8e, 0x19, 0x84, 0x8c, 0x33, 0xfc, 0x52, 0x62, 0x66, 0xd2, 0xc0, 0x35, 0x95, 0x99, 0xd9, 0xde,
+	0x5c, 0xbb, 0xde, 0x70, 0xf9, 0x49, 0xeb, 0xd8, 0xb4, 0x99, 0x57, 0x6d, 0xb0, 0x06, 0xab, 0x4a,
+	0xeb, 0xe3, 0xd6, 0x03, 0x79, 0x92, 0x07, 0xf9, 0x2b, 0x61, 0x59, 0x33, 0x32, 0xce, 0x6c, 0x16,
+	0xe6, 0x79, 0x5a, 0xbb, 0x95, 0xda, 0x78, 0xd4, 0x3e, 0x71, 0x7d, 0x08, 0x3b, 0xd5, 0xe0, 0xb4,
+	0x21, 0x04, 0x51, 0xd5, 0x03, 0x4e, 0xf3, 0x50, 0xd5, 0x71, 0xa8, 0xb0, 0xe5, 0x73, 0xd7, 0x83,
+	0x11, 0xc0, 0xed, 0xe7, 0x01, 0x22, 0xfb, 0x04, 0x3c, 0x3a, 0x8c, 0x33, 0x7e, 0xd5, 0xd0, 0xe2,
+	0xf6, 0xd1, 0xee, 0x4e, 0xe8, 0xb6, 0x21, 0xc4, 0x5f, 0xa2, 0x05, 0x11, 0x91, 0x43, 0x39, 0x2d,
+	0x6b, 0xeb, 0xda, 0x46, 0xf1, 0xe6, 0x0d, 0x33, 0xbd, 0xa9, 0x3e, 0xb1, 0x19, 0x9c, 0x36, 0x84,
+	0x20, 0x32, 0x85, 0xb5, 0xd9, 0xde, 0x34, 0x0f, 0x8e, 0x1f, 0x82, 0xcd, 0xeb, 0xc0, 0xa9, 0x85,
+	0x1f, 0x77, 0xf5, 0xa9, 0xb8, 0xab, 0xa3, 0x54, 0x46, 0xfa, 0xac, 0xf8, 0x03, 0x34, 0x13, 0x05,
+	0x60, 0x97, 0xa7, 0x25, 0xfb, 0x55, 0x33, 0xb7, 0x0e, 0x66, 0x3f, 0xa2, 0xa3, 0x00, 0x6c, 0x6b,
+	0x49, 0x31, 0xce, 0x88, 0x13, 0x91, 0x78, 0xe3, 0x17, 0x0d, 0x2d, 0xf7, 0xad, 0xf6, 0xdc, 0x88,
+	0xe3, 0xcf, 0x46, 0x62, 0x37, 0x2f, 0x16, 0xbb, 0x40, 0xcb, 0xc8, 0x4b, 0xca, 0xcf, 0x42, 0x4f,
+	0x92, 0x89, 0xbb, 0x86, 0x66, 0x5d, 0x0e, 0x5e, 0x54, 0x9e, 0x5e, 0x2f, 0x6c, 0x14, 0x6f, 0xae,
+	0x3f, 0x2f, 0x70, 0x6b, 0x59, 0x91, 0xcd, 0xee, 0x0a, 0x18, 0x49, 0xd0, 0xc6, 0x3f, 0xd9, 0xb0,
+	0x45, 0x3a, 0xf8, 0x2e, 0xba, 0x44, 0x39, 0xa7, 0xf6, 0x09, 0x81, 0x47, 0x2d, 0x37, 0x04, 0x47,
+	0x06, 0xbf, 0x60, 0xe1, 0xb8, 0xab, 0x5f, 0xda, 0x1a, 0xd0, 0x90, 0x21, 0x4b, 0x81, 0x0d, 0x98,
+	0xb3, 0xeb, 0x3f, 0x60, 0x07, 0x7e, 0x9d, 0xb5, 0x7c, 0x2e, 0xaf, 0x55, 0x61, 0x0f, 0x07, 0x34,
+	0x64, 0xc8, 0x12, 0xdb, 0x68, 0xb5, 0xcd, 0x9a, 0x2d, 0x0f, 0xf6, 0xdc, 0x07, 0x60, 0x77, 0xec,
+	0x26, 0xd4, 0x99, 0x03, 0x51, 0xb9, 0xb0, 0x5e, 0xd8, 0x58, 0xb4, 0xaa, 0x71, 0x57, 0x5f, 0xbd,
+	0x9f, 0xa3, 0x3f, 0xeb, 0xea, 0x57, 0x72, 0xe4, 0x24, 0x97, 0xcc, 0xf8, 0x59, 0x43, 0xf3, 0xdb,
+	0x47, 0xbb, 0xfb, 0xcc, 0x81, 0x09, 0xf4, 0xd6, 0xce, 0x40, 0x6f, 0x19, 0xe3, 0x4b, 0x24, 0xe2,
+	0x19, 0xdb, 0x59, 0xff, 0x25, 0x25, 0x12, 0x36, 0x6a, 0x2a, 0xd6, 0xd1, 0x8c, 0x4f, 0x3d, 0x90,
+	0x51, 0x2f, 0xa6, 0x98, 0x7d, 0xea, 0x01, 0x91, 0x1a, 0xfc, 0x3a, 0x9a, 0xf3, 0x99, 0x03, 0xbb,
+	0x3b, 0xd2, 0xf7, 0xa2, 0x75, 0x49, 0xd9, 0xcc, 0xed, 0x4b, 0x29, 0x51, 0x5a, 0x7c, 0x0b, 0x2d,
+	0x71, 0x16, 0xb0, 0x26, 0x6b, 0x74, 0x3e, 0x82, 0x4e, 0xef, 0xb2, 0x4b, 0x71, 0x57, 0x5f, 0xba,
+	0x97, 0x91, 0x93, 0x01, 0x2b, 0xfc, 0x39, 0x2a, 0xd2, 0x66, 0x93, 0xd9, 0x94, 0xd3, 0xe3, 0x26,
+	0x94, 0x67, 0x64, 0x7a, 0xd7, 0xc6, 0xa4, 0x97, 0x14, 0x47, 0xf8, 0x25, 0x10, 0xb1, 0x56, 0x68,
+	0x43, 0x64, 0x5d, 0x8e, 0xbb, 0x7a, 0x71, 0x2b, 0xa5, 0x20, 0x59, 0x3e, 0xe3, 0x27, 0x0d, 0x15,
+	0x55, 0xc2, 0x13, 0x18, 0xa4, 0xed, 0xc1, 0x41, 0xaa, 0x9c, 0x5f, 0xa5, 0x31, 0x63, 0xf4, 0x45,
+	0x3f, 0x62, 0x39, 0x43, 0x07, 0x68, 0xde, 0x91, 0xa5, 0x8a, 0xca, 0x9a, 0x64, 0xbd, 0x7a, 0x3e,
+	0xab, 0x1a, 0xd1, 0xcb, 0x8a, 0x7b, 0x3e, 0x39, 0x47, 0xa4, 0xc7, 0x62, 0x7c, 0x37, 0x87, 0x96,
+	0x8e, 0x12, 0xd8, 0x76, 0x93, 0x46, 0xd1, 0x04, 0x9a, 0xf7, 0x5d, 0x54, 0x0c, 0x42, 0xd6, 0x76,
+	0x23, 0x97, 0xf9, 0x10, 0xaa, 0x3e, 0xba, 0xa2, 0x20, 0xc5, 0xc3, 0x54, 0x45, 0xb2, 0x76, 0xb8,
+	0x81, 0x50, 0x40, 0x43, 0xea, 0x01, 0x17, 0xd9, 0x17, 0x64, 0xf6, 0xef, 0x8c, 0xc9, 0x3e, 0x9b,
+	0x91, 0x79, 0xd8, 0x47, 0xd5, 0x7c, 0x1e, 0x76, 0xd2, 0xe8, 0x52, 0x05, 0xc9, 0x50, 0xe3, 0x53,
+	0xb4, 0x1c, 0x82, 0xdd, 0xa4, 0xae, 0x77, 0xc8, 0x9a, 0xae, 0xdd, 0x91, 0x6d, 0xb8, 0x68, 0xd5,
+	0xe2, 0xae, 0xbe, 0x4c, 0xb2, 0x8a, 0xb3, 0xae, 0x7e, 0x63, 0xf4, 0x5d, 0x34, 0x0f, 0x21, 0x8c,
+	0xdc, 0x88, 0x83, 0xcf, 0x93, 0x0e, 0x1d, 0xc0, 0x90, 0x41, 0x6e, 0x31, 0x27, 0x9e, 0xd8, 0x52,
+	0x07, 0x01, 0x77, 0x99, 0x1f, 0x95, 0x67, 0xd3, 0x39, 0xa9, 0x67, 0xe4, 0x64, 0xc0, 0x0a, 0xef,
+	0xa1, 0x55, 0xd1, 0xd7, 0x5f, 0x25, 0x0e, 0x6a, 0x5f, 0x07, 0xd4, 0x17, 0xb7, 0x54, 0x9e, 0x93,
+	0x4b, 0xb1, 0x2c, 0x56, 0xda, 0x56, 0x8e, 0x9e, 0xe4, 0xa2, 0xf0, 0x27, 0x68, 0x25, 0xd9, 0x69,
+	0x96, 0xeb, 0x3b, 0xae, 0xdf, 0x10, 0x1b, 0xad, 0x3c, 0x2f, 0x93, 0xbe, 0x16, 0x77, 0xf5, 0x95,
+	0xfb, 0xc3, 0xca, 0xb3, 0x3c, 0x21, 0x19, 0x25, 0xc1, 0x8f, 0xd0, 0x8a, 0xf4, 0x08, 0x8e, 0x1a,
+	0x7a, 0x17, 0xa2, 0xf2, 0x82, 0x2c, 0xdd, 0x46, 0xb6, 0x74, 0xe2, 0xea, 0x44, 0xdd, 0x7a, 0xab,
+	0xe1, 0x08, 0x9a, 0x60, 0x73, 0x16, 0xde, 0x83, 0xd0, 0xb3, 0x5e, 0x55, 0xf5, 0x5a, 0xd9, 0x1a,
+	0xa6, 0x22, 0xa3, 0xec, 0x6b, 0xef, 0xa1, 0xcb, 0x43, 0x05, 0xc7, 0x25, 0x54, 0x38, 0x85, 0x4e,
+	0xb2, 0xd4, 0x88, 0xf8, 0x89, 0x57, 0xd1, 0x6c, 0x9b, 0x36, 0x5b, 0x90, 0x34, 0x1f, 0x49, 0x0e,
+	0x77, 0xa7, 0xef, 0x68, 0xc6, 0x6f, 0x1a, 0x2a, 0x65, 0xbb, 0x67, 0x02, 0x7b, 0xe2, 0xc3, 0xc1,
+	0x3d, 0xf1, 0xda, 0x05, 0x7a, 0x7a, 0xcc, 0xb2, 0xf8, 0x71, 0x1a, 0x95, 0x92, 0xba, 0x24, 0xcf,
+	0xa9, 0x07, 0x3e, 0x9f, 0xc0, 0x40, 0xd7, 0x07, 0x5e, 0xa3, 0xb7, 0xce, 0x5d, 0xd7, 0x69, 0x60,
+	0xe3, 0x9e, 0x25, 0xfc, 0x31, 0x9a, 0x8b, 0x38, 0xe5, 0x2d, 0x31, 0xe4, 0x82, 0xf0, 0xfa, 0x45,
+	0x09, 0x25, 0x28, 0x7d, 0x91, 0x92, 0x33, 0x51, 0x64, 0xc6, 0xef, 0x1a, 0x5a, 0x1d, 0x86, 0x4c,
+	0xa0, 0xba, 0x7b, 0x83, 0xd5, 0x7d, 0xe3, 0x82, 0xc9, 0x8c, 0xa9, 0xf0, 0x9f, 0x1a, 0x7a, 0x79,
+	0x24, 0x6f, 0xf9, 0xf6, 0x89, 0x9d, 0x10, 0x0c, 0x6d, 0x9e, 0xfd, 0xf4, 0x2d, 0x97, 0x3b, 0xe1,
+	0x30, 0x47, 0x4f, 0x72, 0x51, 0xf8, 0x21, 0x2a, 0xb9, 0x7e, 0xd3, 0xf5, 0x21, 0x91, 0x1d, 0xa5,
+	0xf5, 0xcd, 0x1d, 0xdc, 0x61, 0x66, 0x59, 0xdc, 0xd5, 0xb8, 0xab, 0x97, 0x76, 0x87, 0x58, 0xc8,
+	0x08, 0xaf, 0xf1, 0x47, 0x4e, 0x65, 0xe4, 0x6b, 0xf7, 0x36, 0x5a, 0x48, 0xbe, 0x03, 0x21, 0x54,
+	0x69, 0xf4, 0x6f, 0x7a, 0x4b, 0xc9, 0x49, 0xdf, 0x42, 0xf6, 0x8d, 0xbc, 0x0a, 0x15, 0xe8, 0x85,
+	0xfb, 0x46, 0x82, 0x32, 0x7d, 0x23, 0xcf, 0x44, 0x91, 0x89, 0x20, 0xc4, 0x37, 0x8d, 0xbc, 0xcb,
+	0xc2, 0x60, 0x10, 0xfb, 0x4a, 0x4e, 0xfa, 0x16, 0xc6, 0xbf, 0x85, 0x9c, 0x02, 0xc9, 0x06, 0xcc,
+	0x64, 0xd3, 0xfb, 0xf2, 0x1d, 0xce, 0xc6, 0xe9, 0x67, 0xe3, 0xe0, 0x1f, 0x34, 0x84, 0x69, 0x9f,
+	0xa2, 0xde, 0x6b, 0xd0, 0xa4, 0x8b, 0x6a, 0x2f, 0x34, 0x12, 0xe6, 0xd6, 0x08, 0x4f, 0xf2, 0x12,
+	0xae, 0x29, 0xff, 0x78, 0xd4, 0x80, 0xe4, 0x38, 0xc7, 0x0e, 0x2a, 0x26, 0xd2, 0x5a, 0x18, 0xb2,
+	0x50, 0x8d, 0xa7, 0x71, 0x6e, 0x2c, 0xd2, 0xd2, 0xaa, 0xc8, 0xcf, 0xb2, 0x14, 0x7a, 0xd6, 0xd5,
+	0x8b, 0x19, 0x3d, 0xc9, 0xd2, 0x0a, 0x2f, 0x0e, 0xa4, 0x5e, 0x66, 0x5e, 0xcc, 0xcb, 0x0e, 0x8c,
+	0xf7, 0x92, 0xa1, 0x5d, 0xab, 0xa1, 0x57, 0xc6, 0x5c, 0xcb, 0x0b, 0xbd, 0x17, 0xdf, 0x6a, 0x28,
+	0xeb, 0x03, 0xef, 0xa1, 0x19, 0xf1, 0x27, 0x54, 0x2d, 0x92, 0x6b, 0x17, 0x5b, 0x24, 0xf7, 0x5c,
+	0x0f, 0xd2, 0x55, 0x28, 0x4e, 0x44, 0xb2, 0xe0, 0x37, 0xd1, 0xbc, 0x07, 0x51, 0x44, 0x1b, 0xca,
+	0x73, 0xfa, 0x21, 0x57, 0x4f, 0xc4, 0xa4, 0xa7, 0x37, 0x6e, 0xa3, 0x2b, 0x39, 0x1f, 0xc4, 0x58,
+	0x47, 0xb3, 0xb6, 0xfc, 0xbf, 0x24, 0x02, 0x9a, 0xb5, 0x16, 0xc5, 0x46, 0xd9, 0x96, 0x7f, 0x93,
+	0x12, 0xb9, 0xb5, 0xf1, 0xf8, 0x59, 0x65, 0xea, 0xc9, 0xb3, 0xca, 0xd4, 0xd3, 0x67, 0x95, 0xa9,
+	0x6f, 0xe2, 0x8a, 0xf6, 0x38, 0xae, 0x68, 0x4f, 0xe2, 0x8a, 0xf6, 0x34, 0xae, 0x68, 0x7f, 0xc5,
+	0x15, 0xed, 0xfb, 0xbf, 0x2b, 0x53, 0x9f, 0x4e, 0xb7, 0x37, 0xff, 0x0f, 0x00, 0x00, 0xff, 0xff,
+	0x9b, 0x74, 0xdf, 0x56, 0x8a, 0x10, 0x00, 0x00,
+}
+
+func (m *CSIDriver) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *CSIDriver) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *CSIDriver) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	{
+		size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i])
+		if err != nil {
+			return 0, err
+		}
+		i -= size
+		i = encodeVarintGenerated(dAtA, i, uint64(size))
+	}
+	i--
+	dAtA[i] = 0x12
+	{
+		size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
+		if err != nil {
+			return 0, err
+		}
+		i -= size
+		i = encodeVarintGenerated(dAtA, i, uint64(size))
+	}
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *CSIDriverList) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *CSIDriverList) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *CSIDriverList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if len(m.Items) > 0 {
+		for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- {
+			{
+				size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintGenerated(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0x12
+		}
+	}
+	{
+		size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i])
+		if err != nil {
+			return 0, err
+		}
+		i -= size
+		i = encodeVarintGenerated(dAtA, i, uint64(size))
+	}
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *CSIDriverSpec) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *CSIDriverSpec) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *CSIDriverSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if len(m.VolumeLifecycleModes) > 0 {
+		for iNdEx := len(m.VolumeLifecycleModes) - 1; iNdEx >= 0; iNdEx-- {
+			i -= len(m.VolumeLifecycleModes[iNdEx])
+			copy(dAtA[i:], m.VolumeLifecycleModes[iNdEx])
+			i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumeLifecycleModes[iNdEx])))
+			i--
+			dAtA[i] = 0x1a
+		}
+	}
+	if m.PodInfoOnMount != nil {
+		i--
+		if *m.PodInfoOnMount {
+			dAtA[i] = 1
+		} else {
+			dAtA[i] = 0
+		}
+		i--
+		dAtA[i] = 0x10
+	}
+	if m.AttachRequired != nil {
+		i--
+		if *m.AttachRequired {
+			dAtA[i] = 1
+		} else {
+			dAtA[i] = 0
+		}
+		i--
+		dAtA[i] = 0x8
+	}
+	return len(dAtA) - i, nil
 }
 
 func (m *CSINode) Marshal() (dAtA []byte, err error) {
@@ -1190,6 +1427,57 @@ func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int {
 	dAtA[offset] = uint8(v)
 	return base
 }
+func (m *CSIDriver) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = m.ObjectMeta.Size()
+	n += 1 + l + sovGenerated(uint64(l))
+	l = m.Spec.Size()
+	n += 1 + l + sovGenerated(uint64(l))
+	return n
+}
+
+func (m *CSIDriverList) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = m.ListMeta.Size()
+	n += 1 + l + sovGenerated(uint64(l))
+	if len(m.Items) > 0 {
+		for _, e := range m.Items {
+			l = e.Size()
+			n += 1 + l + sovGenerated(uint64(l))
+		}
+	}
+	return n
+}
+
+func (m *CSIDriverSpec) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.AttachRequired != nil {
+		n += 2
+	}
+	if m.PodInfoOnMount != nil {
+		n += 2
+	}
+	if len(m.VolumeLifecycleModes) > 0 {
+		for _, s := range m.VolumeLifecycleModes {
+			l = len(s)
+			n += 1 + l + sovGenerated(uint64(l))
+		}
+	}
+	return n
+}
+
 func (m *CSINode) Size() (n int) {
 	if m == nil {
 		return 0
@@ -1440,6 +1728,45 @@ func sovGenerated(x uint64) (n int) {
 func sozGenerated(x uint64) (n int) {
 	return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63))))
 }
+func (this *CSIDriver) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&CSIDriver{`,
+		`ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`,
+		`Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "CSIDriverSpec", "CSIDriverSpec", 1), `&`, ``, 1) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *CSIDriverList) String() string {
+	if this == nil {
+		return "nil"
+	}
+	repeatedStringForItems := "[]CSIDriver{"
+	for _, f := range this.Items {
+		repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "CSIDriver", "CSIDriver", 1), `&`, ``, 1) + ","
+	}
+	repeatedStringForItems += "}"
+	s := strings.Join([]string{`&CSIDriverList{`,
+		`ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`,
+		`Items:` + repeatedStringForItems + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *CSIDriverSpec) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&CSIDriverSpec{`,
+		`AttachRequired:` + valueToStringGenerated(this.AttachRequired) + `,`,
+		`PodInfoOnMount:` + valueToStringGenerated(this.PodInfoOnMount) + `,`,
+		`VolumeLifecycleModes:` + fmt.Sprintf("%v", this.VolumeLifecycleModes) + `,`,
+		`}`,
+	}, "")
+	return s
+}
 func (this *CSINode) String() string {
 	if this == nil {
 		return "nil"
@@ -1646,6 +1973,372 @@ func valueToStringGenerated(v interface{}) string {
 	pv := reflect.Indirect(rv).Interface()
 	return fmt.Sprintf("*%v", pv)
 }
+func (m *CSIDriver) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: CSIDriver: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: CSIDriver: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *CSIDriverList) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: CSIDriverList: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: CSIDriverList: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Items = append(m.Items, CSIDriver{})
+			if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *CSIDriverSpec) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowGenerated
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: CSIDriverSpec: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: CSIDriverSpec: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field AttachRequired", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			b := bool(v != 0)
+			m.AttachRequired = &b
+		case 2:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field PodInfoOnMount", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			b := bool(v != 0)
+			m.PodInfoOnMount = &b
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field VolumeLifecycleModes", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowGenerated
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.VolumeLifecycleModes = append(m.VolumeLifecycleModes, VolumeLifecycleMode(dAtA[iNdEx:postIndex]))
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipGenerated(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthGenerated
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
 func (m *CSINode) Unmarshal(dAtA []byte) error {
 	l := len(dAtA)
 	iNdEx := 0
@@ -3685,6 +4378,7 @@ func (m *VolumeNodeResources) Unmarshal(dAtA []byte) error {
 func skipGenerated(dAtA []byte) (n int, err error) {
 	l := len(dAtA)
 	iNdEx := 0
+	depth := 0
 	for iNdEx < l {
 		var wire uint64
 		for shift := uint(0); ; shift += 7 {
@@ -3716,10 +4410,8 @@ func skipGenerated(dAtA []byte) (n int, err error) {
 					break
 				}
 			}
-			return iNdEx, nil
 		case 1:
 			iNdEx += 8
-			return iNdEx, nil
 		case 2:
 			var length int
 			for shift := uint(0); ; shift += 7 {
@@ -3740,55 +4432,30 @@ func skipGenerated(dAtA []byte) (n int, err error) {
 				return 0, ErrInvalidLengthGenerated
 			}
 			iNdEx += length
-			if iNdEx < 0 {
-				return 0, ErrInvalidLengthGenerated
-			}
-			return iNdEx, nil
 		case 3:
-			for {
-				var innerWire uint64
-				var start int = iNdEx
-				for shift := uint(0); ; shift += 7 {
-					if shift >= 64 {
-						return 0, ErrIntOverflowGenerated
-					}
-					if iNdEx >= l {
-						return 0, io.ErrUnexpectedEOF
-					}
-					b := dAtA[iNdEx]
-					iNdEx++
-					innerWire |= (uint64(b) & 0x7F) << shift
-					if b < 0x80 {
-						break
-					}
-				}
-				innerWireType := int(innerWire & 0x7)
-				if innerWireType == 4 {
-					break
-				}
-				next, err := skipGenerated(dAtA[start:])
-				if err != nil {
-					return 0, err
-				}
-				iNdEx = start + next
-				if iNdEx < 0 {
-					return 0, ErrInvalidLengthGenerated
-				}
-			}
-			return iNdEx, nil
+			depth++
 		case 4:
-			return iNdEx, nil
+			if depth == 0 {
+				return 0, ErrUnexpectedEndOfGroupGenerated
+			}
+			depth--
 		case 5:
 			iNdEx += 4
-			return iNdEx, nil
 		default:
 			return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
 		}
+		if iNdEx < 0 {
+			return 0, ErrInvalidLengthGenerated
+		}
+		if depth == 0 {
+			return iNdEx, nil
+		}
 	}
-	panic("unreachable")
+	return 0, io.ErrUnexpectedEOF
 }
 
 var (
-	ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling")
-	ErrIntOverflowGenerated   = fmt.Errorf("proto: integer overflow")
+	ErrInvalidLengthGenerated        = fmt.Errorf("proto: negative length found during unmarshaling")
+	ErrIntOverflowGenerated          = fmt.Errorf("proto: integer overflow")
+	ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group")
 )
diff --git a/vendor/k8s.io/api/storage/v1/generated.proto b/vendor/k8s.io/api/storage/v1/generated.proto
index e5004c84..cb3c42c7 100644
--- a/vendor/k8s.io/api/storage/v1/generated.proto
+++ b/vendor/k8s.io/api/storage/v1/generated.proto
@@ -29,6 +29,97 @@ import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto";
 // Package-wide variables from generator "generated".
 option go_package = "v1";
 
+// CSIDriver captures information about a Container Storage Interface (CSI)
+// volume driver deployed on the cluster.
+// Kubernetes attach detach controller uses this object to determine whether attach is required.
+// Kubelet uses this object to determine whether pod information needs to be passed on mount.
+// CSIDriver objects are non-namespaced.
+message CSIDriver {
+  // Standard object metadata.
+  // metadata.Name indicates the name of the CSI driver that this object
+  // refers to; it MUST be the same name returned by the CSI GetPluginName()
+  // call for that driver.
+  // The driver name must be 63 characters or less, beginning and ending with
+  // an alphanumeric character ([a-z0-9A-Z]) with dashes (-), dots (.), and
+  // alphanumerics between.
+  // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+  // Specification of the CSI Driver.
+  optional CSIDriverSpec spec = 2;
+}
+
+// CSIDriverList is a collection of CSIDriver objects.
+message CSIDriverList {
+  // Standard list metadata
+  // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+  // +optional
+  optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
+
+  // items is the list of CSIDriver
+  repeated CSIDriver items = 2;
+}
+
+// CSIDriverSpec is the specification of a CSIDriver.
+message CSIDriverSpec {
+  // attachRequired indicates this CSI volume driver requires an attach
+  // operation (because it implements the CSI ControllerPublishVolume()
+  // method), and that the Kubernetes attach detach controller should call
+  // the attach volume interface which checks the volumeattachment status
+  // and waits until the volume is attached before proceeding to mounting.
+  // The CSI external-attacher coordinates with CSI volume driver and updates
+  // the volumeattachment status when the attach operation is complete.
+  // If the CSIDriverRegistry feature gate is enabled and the value is
+  // specified to false, the attach operation will be skipped.
+  // Otherwise the attach operation will be called.
+  // +optional
+  optional bool attachRequired = 1;
+
+  // If set to true, podInfoOnMount indicates this CSI volume driver
+  // requires additional pod information (like podName, podUID, etc.) during
+  // mount operations.
+  // If set to false, pod information will not be passed on mount.
+  // Default is false.
+  // The CSI driver specifies podInfoOnMount as part of driver deployment.
+  // If true, Kubelet will pass pod information as VolumeContext in the CSI
+  // NodePublishVolume() calls.
+  // The CSI driver is responsible for parsing and validating the information
+  // passed in as VolumeContext.
+  // The following VolumeConext will be passed if podInfoOnMount is set to true.
+  // This list might grow, but the prefix will be used.
+  // "csi.storage.k8s.io/pod.name": pod.Name
+  // "csi.storage.k8s.io/pod.namespace": pod.Namespace
+  // "csi.storage.k8s.io/pod.uid": string(pod.UID)
+  // "csi.storage.k8s.io/ephemeral": "true" iff the volume is an ephemeral inline volume
+  //                                 defined by a CSIVolumeSource, otherwise "false"
+  //
+  // "csi.storage.k8s.io/ephemeral" is a new feature in Kubernetes 1.16. It is only
+  // required for drivers which support both the "Persistent" and "Ephemeral" VolumeLifecycleMode.
+  // Other drivers can leave pod info disabled and/or ignore this field.
+  // As Kubernetes 1.15 doesn't support this field, drivers can only support one mode when
+  // deployed on such a cluster and the deployment determines which mode that is, for example
+  // via a command line parameter of the driver.
+  // +optional
+  optional bool podInfoOnMount = 2;
+
+  // volumeLifecycleModes defines what kind of volumes this CSI volume driver supports.
+  // The default if the list is empty is "Persistent", which is the usage
+  // defined by the CSI specification and implemented in Kubernetes via the usual
+  // PV/PVC mechanism.
+  // The other mode is "Ephemeral". In this mode, volumes are defined inline
+  // inside the pod spec with CSIVolumeSource and their lifecycle is tied to
+  // the lifecycle of that pod. A driver has to be aware of this
+  // because it is only going to get a NodePublishVolume call for such a volume.
+  // For more information about implementing this mode, see
+  // https://kubernetes-csi.github.io/docs/ephemeral-local-volumes.html
+  // A driver can support one or more of these modes and
+  // more modes may be added in the future.
+  // This field is beta.
+  // +optional
+  // +listType=set
+  repeated string volumeLifecycleModes = 3;
+}
+
 // CSINode holds information about all CSI drivers installed on a node.
 // CSI drivers do not need to create the CSINode object directly. As long as
 // they use the node-driver-registrar sidecar container, the kubelet will
diff --git a/vendor/k8s.io/api/storage/v1/register.go b/vendor/k8s.io/api/storage/v1/register.go
index 67493fd0..1a2f83d1 100644
--- a/vendor/k8s.io/api/storage/v1/register.go
+++ b/vendor/k8s.io/api/storage/v1/register.go
@@ -52,6 +52,9 @@ func addKnownTypes(scheme *runtime.Scheme) error {
 
 		&CSINode{},
 		&CSINodeList{},
+
+		&CSIDriver{},
+		&CSIDriverList{},
 	)
 
 	metav1.AddToGroupVersion(scheme, SchemeGroupVersion)
diff --git a/vendor/k8s.io/api/storage/v1/types.go b/vendor/k8s.io/api/storage/v1/types.go
index 86cb78b6..556427bb 100644
--- a/vendor/k8s.io/api/storage/v1/types.go
+++ b/vendor/k8s.io/api/storage/v1/types.go
@@ -221,6 +221,132 @@ type VolumeError struct {
 // +genclient:nonNamespaced
 // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
 
+// CSIDriver captures information about a Container Storage Interface (CSI)
+// volume driver deployed on the cluster.
+// Kubernetes attach detach controller uses this object to determine whether attach is required.
+// Kubelet uses this object to determine whether pod information needs to be passed on mount.
+// CSIDriver objects are non-namespaced.
+type CSIDriver struct {
+	metav1.TypeMeta `json:",inline"`
+
+	// Standard object metadata.
+	// metadata.Name indicates the name of the CSI driver that this object
+	// refers to; it MUST be the same name returned by the CSI GetPluginName()
+	// call for that driver.
+	// The driver name must be 63 characters or less, beginning and ending with
+	// an alphanumeric character ([a-z0-9A-Z]) with dashes (-), dots (.), and
+	// alphanumerics between.
+	// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+	metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+	// Specification of the CSI Driver.
+	Spec CSIDriverSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// CSIDriverList is a collection of CSIDriver objects.
+type CSIDriverList struct {
+	metav1.TypeMeta `json:",inline"`
+
+	// Standard list metadata
+	// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+	// +optional
+	metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+	// items is the list of CSIDriver
+	Items []CSIDriver `json:"items" protobuf:"bytes,2,rep,name=items"`
+}
+
+// CSIDriverSpec is the specification of a CSIDriver.
+type CSIDriverSpec struct {
+	// attachRequired indicates this CSI volume driver requires an attach
+	// operation (because it implements the CSI ControllerPublishVolume()
+	// method), and that the Kubernetes attach detach controller should call
+	// the attach volume interface which checks the volumeattachment status
+	// and waits until the volume is attached before proceeding to mounting.
+	// The CSI external-attacher coordinates with CSI volume driver and updates
+	// the volumeattachment status when the attach operation is complete.
+	// If the CSIDriverRegistry feature gate is enabled and the value is
+	// specified to false, the attach operation will be skipped.
+	// Otherwise the attach operation will be called.
+	// +optional
+	AttachRequired *bool `json:"attachRequired,omitempty" protobuf:"varint,1,opt,name=attachRequired"`
+
+	// If set to true, podInfoOnMount indicates this CSI volume driver
+	// requires additional pod information (like podName, podUID, etc.) during
+	// mount operations.
+	// If set to false, pod information will not be passed on mount.
+	// Default is false.
+	// The CSI driver specifies podInfoOnMount as part of driver deployment.
+	// If true, Kubelet will pass pod information as VolumeContext in the CSI
+	// NodePublishVolume() calls.
+	// The CSI driver is responsible for parsing and validating the information
+	// passed in as VolumeContext.
+	// The following VolumeConext will be passed if podInfoOnMount is set to true.
+	// This list might grow, but the prefix will be used.
+	// "csi.storage.k8s.io/pod.name": pod.Name
+	// "csi.storage.k8s.io/pod.namespace": pod.Namespace
+	// "csi.storage.k8s.io/pod.uid": string(pod.UID)
+	// "csi.storage.k8s.io/ephemeral": "true" iff the volume is an ephemeral inline volume
+	//                                 defined by a CSIVolumeSource, otherwise "false"
+	//
+	// "csi.storage.k8s.io/ephemeral" is a new feature in Kubernetes 1.16. It is only
+	// required for drivers which support both the "Persistent" and "Ephemeral" VolumeLifecycleMode.
+	// Other drivers can leave pod info disabled and/or ignore this field.
+	// As Kubernetes 1.15 doesn't support this field, drivers can only support one mode when
+	// deployed on such a cluster and the deployment determines which mode that is, for example
+	// via a command line parameter of the driver.
+	// +optional
+	PodInfoOnMount *bool `json:"podInfoOnMount,omitempty" protobuf:"bytes,2,opt,name=podInfoOnMount"`
+
+	// volumeLifecycleModes defines what kind of volumes this CSI volume driver supports.
+	// The default if the list is empty is "Persistent", which is the usage
+	// defined by the CSI specification and implemented in Kubernetes via the usual
+	// PV/PVC mechanism.
+	// The other mode is "Ephemeral". In this mode, volumes are defined inline
+	// inside the pod spec with CSIVolumeSource and their lifecycle is tied to
+	// the lifecycle of that pod. A driver has to be aware of this
+	// because it is only going to get a NodePublishVolume call for such a volume.
+	// For more information about implementing this mode, see
+	// https://kubernetes-csi.github.io/docs/ephemeral-local-volumes.html
+	// A driver can support one or more of these modes and
+	// more modes may be added in the future.
+	// This field is beta.
+	// +optional
+	// +listType=set
+	VolumeLifecycleModes []VolumeLifecycleMode `json:"volumeLifecycleModes,omitempty" protobuf:"bytes,3,opt,name=volumeLifecycleModes"`
+}
+
+// VolumeLifecycleMode is an enumeration of possible usage modes for a volume
+// provided by a CSI driver. More modes may be added in the future.
+type VolumeLifecycleMode string
+
+const (
+	// VolumeLifecyclePersistent explicitly confirms that the driver implements
+	// the full CSI spec. It is the default when CSIDriverSpec.VolumeLifecycleModes is not
+	// set. Such volumes are managed in Kubernetes via the persistent volume
+	// claim mechanism and have a lifecycle that is independent of the pods which
+	// use them.
+	VolumeLifecyclePersistent VolumeLifecycleMode = "Persistent"
+
+	// VolumeLifecycleEphemeral indicates that the driver can be used for
+	// ephemeral inline volumes. Such volumes are specified inside the pod
+	// spec with a CSIVolumeSource and, as far as Kubernetes is concerned, have
+	// a lifecycle that is tied to the lifecycle of the pod. For example, such
+	// a volume might contain data that gets created specifically for that pod,
+	// like secrets.
+	// But how the volume actually gets created and managed is entirely up to
+	// the driver. It might also use reference counting to share the same volume
+	// instance among different pods if the CSIVolumeSource of those pods is
+	// identical.
+	VolumeLifecycleEphemeral VolumeLifecycleMode = "Ephemeral"
+)
+
+// +genclient
+// +genclient:nonNamespaced
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
 // CSINode holds information about all CSI drivers installed on a node.
 // CSI drivers do not need to create the CSINode object directly. As long as
 // they use the node-driver-registrar sidecar container, the kubelet will
diff --git a/vendor/k8s.io/api/storage/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/storage/v1/types_swagger_doc_generated.go
index d6e3a162..0e524a28 100644
--- a/vendor/k8s.io/api/storage/v1/types_swagger_doc_generated.go
+++ b/vendor/k8s.io/api/storage/v1/types_swagger_doc_generated.go
@@ -27,6 +27,37 @@ package v1
 // Those methods can be generated by using hack/update-generated-swagger-docs.sh
 
 // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT.
+var map_CSIDriver = map[string]string{
+	"":         "CSIDriver captures information about a Container Storage Interface (CSI) volume driver deployed on the cluster. Kubernetes attach detach controller uses this object to determine whether attach is required. Kubelet uses this object to determine whether pod information needs to be passed on mount. CSIDriver objects are non-namespaced.",
+	"metadata": "Standard object metadata. metadata.Name indicates the name of the CSI driver that this object refers to; it MUST be the same name returned by the CSI GetPluginName() call for that driver. The driver name must be 63 characters or less, beginning and ending with an alphanumeric character ([a-z0-9A-Z]) with dashes (-), dots (.), and alphanumerics between. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+	"spec":     "Specification of the CSI Driver.",
+}
+
+func (CSIDriver) SwaggerDoc() map[string]string {
+	return map_CSIDriver
+}
+
+var map_CSIDriverList = map[string]string{
+	"":         "CSIDriverList is a collection of CSIDriver objects.",
+	"metadata": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+	"items":    "items is the list of CSIDriver",
+}
+
+func (CSIDriverList) SwaggerDoc() map[string]string {
+	return map_CSIDriverList
+}
+
+var map_CSIDriverSpec = map[string]string{
+	"":                     "CSIDriverSpec is the specification of a CSIDriver.",
+	"attachRequired":       "attachRequired indicates this CSI volume driver requires an attach operation (because it implements the CSI ControllerPublishVolume() method), and that the Kubernetes attach detach controller should call the attach volume interface which checks the volumeattachment status and waits until the volume is attached before proceeding to mounting. The CSI external-attacher coordinates with CSI volume driver and updates the volumeattachment status when the attach operation is complete. If the CSIDriverRegistry feature gate is enabled and the value is specified to false, the attach operation will be skipped. Otherwise the attach operation will be called.",
+	"podInfoOnMount":       "If set to true, podInfoOnMount indicates this CSI volume driver requires additional pod information (like podName, podUID, etc.) during mount operations. If set to false, pod information will not be passed on mount. Default is false. The CSI driver specifies podInfoOnMount as part of driver deployment. If true, Kubelet will pass pod information as VolumeContext in the CSI NodePublishVolume() calls. The CSI driver is responsible for parsing and validating the information passed in as VolumeContext. The following VolumeConext will be passed if podInfoOnMount is set to true. This list might grow, but the prefix will be used. \"csi.storage.k8s.io/pod.name\": pod.Name \"csi.storage.k8s.io/pod.namespace\": pod.Namespace \"csi.storage.k8s.io/pod.uid\": string(pod.UID) \"csi.storage.k8s.io/ephemeral\": \"true\" iff the volume is an ephemeral inline volume\n                                defined by a CSIVolumeSource, otherwise \"false\"\n\n\"csi.storage.k8s.io/ephemeral\" is a new feature in Kubernetes 1.16. It is only required for drivers which support both the \"Persistent\" and \"Ephemeral\" VolumeLifecycleMode. Other drivers can leave pod info disabled and/or ignore this field. As Kubernetes 1.15 doesn't support this field, drivers can only support one mode when deployed on such a cluster and the deployment determines which mode that is, for example via a command line parameter of the driver.",
+	"volumeLifecycleModes": "volumeLifecycleModes defines what kind of volumes this CSI volume driver supports. The default if the list is empty is \"Persistent\", which is the usage defined by the CSI specification and implemented in Kubernetes via the usual PV/PVC mechanism. The other mode is \"Ephemeral\". In this mode, volumes are defined inline inside the pod spec with CSIVolumeSource and their lifecycle is tied to the lifecycle of that pod. A driver has to be aware of this because it is only going to get a NodePublishVolume call for such a volume. For more information about implementing this mode, see https://kubernetes-csi.github.io/docs/ephemeral-local-volumes.html A driver can support one or more of these modes and more modes may be added in the future. This field is beta.",
+}
+
+func (CSIDriverSpec) SwaggerDoc() map[string]string {
+	return map_CSIDriverSpec
+}
+
 var map_CSINode = map[string]string{
 	"":         "CSINode holds information about all CSI drivers installed on a node. CSI drivers do not need to create the CSINode object directly. As long as they use the node-driver-registrar sidecar container, the kubelet will automatically populate the CSINode object for the CSI driver as part of kubelet plugin registration. CSINode has the same name as a node. If the object is missing, it means either there are no CSI Drivers available on the node, or the Kubelet version is low enough that it doesn't create this object. CSINode has an OwnerReference that points to the corresponding node object.",
 	"metadata": "metadata.name must be the Kubernetes node name.",
diff --git a/vendor/k8s.io/api/storage/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/storage/v1/zz_generated.deepcopy.go
index 76255a0a..efaa40aa 100644
--- a/vendor/k8s.io/api/storage/v1/zz_generated.deepcopy.go
+++ b/vendor/k8s.io/api/storage/v1/zz_generated.deepcopy.go
@@ -25,6 +25,97 @@ import (
 	runtime "k8s.io/apimachinery/pkg/runtime"
 )
 
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *CSIDriver) DeepCopyInto(out *CSIDriver) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+	in.Spec.DeepCopyInto(&out.Spec)
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CSIDriver.
+func (in *CSIDriver) DeepCopy() *CSIDriver {
+	if in == nil {
+		return nil
+	}
+	out := new(CSIDriver)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *CSIDriver) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *CSIDriverList) DeepCopyInto(out *CSIDriverList) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	in.ListMeta.DeepCopyInto(&out.ListMeta)
+	if in.Items != nil {
+		in, out := &in.Items, &out.Items
+		*out = make([]CSIDriver, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CSIDriverList.
+func (in *CSIDriverList) DeepCopy() *CSIDriverList {
+	if in == nil {
+		return nil
+	}
+	out := new(CSIDriverList)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *CSIDriverList) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *CSIDriverSpec) DeepCopyInto(out *CSIDriverSpec) {
+	*out = *in
+	if in.AttachRequired != nil {
+		in, out := &in.AttachRequired, &out.AttachRequired
+		*out = new(bool)
+		**out = **in
+	}
+	if in.PodInfoOnMount != nil {
+		in, out := &in.PodInfoOnMount, &out.PodInfoOnMount
+		*out = new(bool)
+		**out = **in
+	}
+	if in.VolumeLifecycleModes != nil {
+		in, out := &in.VolumeLifecycleModes, &out.VolumeLifecycleModes
+		*out = make([]VolumeLifecycleMode, len(*in))
+		copy(*out, *in)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CSIDriverSpec.
+func (in *CSIDriverSpec) DeepCopy() *CSIDriverSpec {
+	if in == nil {
+		return nil
+	}
+	out := new(CSIDriverSpec)
+	in.DeepCopyInto(out)
+	return out
+}
+
 // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
 func (in *CSINode) DeepCopyInto(out *CSINode) {
 	*out = *in
diff --git a/vendor/k8s.io/api/storage/v1alpha1/generated.pb.go b/vendor/k8s.io/api/storage/v1alpha1/generated.pb.go
index 42324352..1f9db7ae 100644
--- a/vendor/k8s.io/api/storage/v1alpha1/generated.pb.go
+++ b/vendor/k8s.io/api/storage/v1alpha1/generated.pb.go
@@ -43,7 +43,7 @@ var _ = math.Inf
 // is compatible with the proto package it is being compiled against.
 // A compilation error at this line likely means your copy of the
 // proto package needs to be updated.
-const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
+const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
 
 func (m *VolumeAttachment) Reset()      { *m = VolumeAttachment{} }
 func (*VolumeAttachment) ProtoMessage() {}
@@ -1730,6 +1730,7 @@ func (m *VolumeError) Unmarshal(dAtA []byte) error {
 func skipGenerated(dAtA []byte) (n int, err error) {
 	l := len(dAtA)
 	iNdEx := 0
+	depth := 0
 	for iNdEx < l {
 		var wire uint64
 		for shift := uint(0); ; shift += 7 {
@@ -1761,10 +1762,8 @@ func skipGenerated(dAtA []byte) (n int, err error) {
 					break
 				}
 			}
-			return iNdEx, nil
 		case 1:
 			iNdEx += 8
-			return iNdEx, nil
 		case 2:
 			var length int
 			for shift := uint(0); ; shift += 7 {
@@ -1785,55 +1784,30 @@ func skipGenerated(dAtA []byte) (n int, err error) {
 				return 0, ErrInvalidLengthGenerated
 			}
 			iNdEx += length
-			if iNdEx < 0 {
-				return 0, ErrInvalidLengthGenerated
-			}
-			return iNdEx, nil
 		case 3:
-			for {
-				var innerWire uint64
-				var start int = iNdEx
-				for shift := uint(0); ; shift += 7 {
-					if shift >= 64 {
-						return 0, ErrIntOverflowGenerated
-					}
-					if iNdEx >= l {
-						return 0, io.ErrUnexpectedEOF
-					}
-					b := dAtA[iNdEx]
-					iNdEx++
-					innerWire |= (uint64(b) & 0x7F) << shift
-					if b < 0x80 {
-						break
-					}
-				}
-				innerWireType := int(innerWire & 0x7)
-				if innerWireType == 4 {
-					break
-				}
-				next, err := skipGenerated(dAtA[start:])
-				if err != nil {
-					return 0, err
-				}
-				iNdEx = start + next
-				if iNdEx < 0 {
-					return 0, ErrInvalidLengthGenerated
-				}
-			}
-			return iNdEx, nil
+			depth++
 		case 4:
-			return iNdEx, nil
+			if depth == 0 {
+				return 0, ErrUnexpectedEndOfGroupGenerated
+			}
+			depth--
 		case 5:
 			iNdEx += 4
-			return iNdEx, nil
 		default:
 			return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
 		}
+		if iNdEx < 0 {
+			return 0, ErrInvalidLengthGenerated
+		}
+		if depth == 0 {
+			return iNdEx, nil
+		}
 	}
-	panic("unreachable")
+	return 0, io.ErrUnexpectedEOF
 }
 
 var (
-	ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling")
-	ErrIntOverflowGenerated   = fmt.Errorf("proto: integer overflow")
+	ErrInvalidLengthGenerated        = fmt.Errorf("proto: negative length found during unmarshaling")
+	ErrIntOverflowGenerated          = fmt.Errorf("proto: integer overflow")
+	ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group")
 )
diff --git a/vendor/k8s.io/api/storage/v1beta1/generated.pb.go b/vendor/k8s.io/api/storage/v1beta1/generated.pb.go
index cd35af34..af4ce59f 100644
--- a/vendor/k8s.io/api/storage/v1beta1/generated.pb.go
+++ b/vendor/k8s.io/api/storage/v1beta1/generated.pb.go
@@ -44,7 +44,7 @@ var _ = math.Inf
 // is compatible with the proto package it is being compiled against.
 // A compilation error at this line likely means your copy of the
 // proto package needs to be updated.
-const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
+const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
 
 func (m *CSIDriver) Reset()      { *m = CSIDriver{} }
 func (*CSIDriver) ProtoMessage() {}
@@ -4378,6 +4378,7 @@ func (m *VolumeNodeResources) Unmarshal(dAtA []byte) error {
 func skipGenerated(dAtA []byte) (n int, err error) {
 	l := len(dAtA)
 	iNdEx := 0
+	depth := 0
 	for iNdEx < l {
 		var wire uint64
 		for shift := uint(0); ; shift += 7 {
@@ -4409,10 +4410,8 @@ func skipGenerated(dAtA []byte) (n int, err error) {
 					break
 				}
 			}
-			return iNdEx, nil
 		case 1:
 			iNdEx += 8
-			return iNdEx, nil
 		case 2:
 			var length int
 			for shift := uint(0); ; shift += 7 {
@@ -4433,55 +4432,30 @@ func skipGenerated(dAtA []byte) (n int, err error) {
 				return 0, ErrInvalidLengthGenerated
 			}
 			iNdEx += length
-			if iNdEx < 0 {
-				return 0, ErrInvalidLengthGenerated
-			}
-			return iNdEx, nil
 		case 3:
-			for {
-				var innerWire uint64
-				var start int = iNdEx
-				for shift := uint(0); ; shift += 7 {
-					if shift >= 64 {
-						return 0, ErrIntOverflowGenerated
-					}
-					if iNdEx >= l {
-						return 0, io.ErrUnexpectedEOF
-					}
-					b := dAtA[iNdEx]
-					iNdEx++
-					innerWire |= (uint64(b) & 0x7F) << shift
-					if b < 0x80 {
-						break
-					}
-				}
-				innerWireType := int(innerWire & 0x7)
-				if innerWireType == 4 {
-					break
-				}
-				next, err := skipGenerated(dAtA[start:])
-				if err != nil {
-					return 0, err
-				}
-				iNdEx = start + next
-				if iNdEx < 0 {
-					return 0, ErrInvalidLengthGenerated
-				}
-			}
-			return iNdEx, nil
+			depth++
 		case 4:
-			return iNdEx, nil
+			if depth == 0 {
+				return 0, ErrUnexpectedEndOfGroupGenerated
+			}
+			depth--
 		case 5:
 			iNdEx += 4
-			return iNdEx, nil
 		default:
 			return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
 		}
+		if iNdEx < 0 {
+			return 0, ErrInvalidLengthGenerated
+		}
+		if depth == 0 {
+			return iNdEx, nil
+		}
 	}
-	panic("unreachable")
+	return 0, io.ErrUnexpectedEOF
 }
 
 var (
-	ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling")
-	ErrIntOverflowGenerated   = fmt.Errorf("proto: integer overflow")
+	ErrInvalidLengthGenerated        = fmt.Errorf("proto: negative length found during unmarshaling")
+	ErrIntOverflowGenerated          = fmt.Errorf("proto: integer overflow")
+	ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group")
 )
diff --git a/vendor/k8s.io/apimachinery/pkg/api/equality/semantic.go b/vendor/k8s.io/apimachinery/pkg/api/equality/semantic.go
new file mode 100644
index 00000000..f02fa8e4
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/api/equality/semantic.go
@@ -0,0 +1,49 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package equality
+
+import (
+	"k8s.io/apimachinery/pkg/api/resource"
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	"k8s.io/apimachinery/pkg/conversion"
+	"k8s.io/apimachinery/pkg/fields"
+	"k8s.io/apimachinery/pkg/labels"
+)
+
+// Semantic can do semantic deep equality checks for api objects.
+// Example: apiequality.Semantic.DeepEqual(aPod, aPodWithNonNilButEmptyMaps) == true
+var Semantic = conversion.EqualitiesOrDie(
+	func(a, b resource.Quantity) bool {
+		// Ignore formatting, only care that numeric value stayed the same.
+		// TODO: if we decide it's important, it should be safe to start comparing the format.
+		//
+		// Uninitialized quantities are equivalent to 0 quantities.
+		return a.Cmp(b) == 0
+	},
+	func(a, b metav1.MicroTime) bool {
+		return a.UTC() == b.UTC()
+	},
+	func(a, b metav1.Time) bool {
+		return a.UTC() == b.UTC()
+	},
+	func(a, b labels.Selector) bool {
+		return a.String() == b.String()
+	},
+	func(a, b fields.Selector) bool {
+		return a.String() == b.String()
+	},
+)
diff --git a/vendor/k8s.io/apimachinery/pkg/api/resource/generated.pb.go b/vendor/k8s.io/apimachinery/pkg/api/resource/generated.pb.go
index 9fca2e16..2e09f4fa 100644
--- a/vendor/k8s.io/apimachinery/pkg/api/resource/generated.pb.go
+++ b/vendor/k8s.io/apimachinery/pkg/api/resource/generated.pb.go
@@ -36,7 +36,7 @@ var _ = math.Inf
 // is compatible with the proto package it is being compiled against.
 // A compilation error at this line likely means your copy of the
 // proto package needs to be updated.
-const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
+const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
 
 func (m *Quantity) Reset()      { *m = Quantity{} }
 func (*Quantity) ProtoMessage() {}
diff --git a/vendor/k8s.io/apimachinery/pkg/api/resource/math.go b/vendor/k8s.io/apimachinery/pkg/api/resource/math.go
index 7f63175d..8ffcb9f0 100644
--- a/vendor/k8s.io/apimachinery/pkg/api/resource/math.go
+++ b/vendor/k8s.io/apimachinery/pkg/api/resource/math.go
@@ -37,12 +37,8 @@ var (
 	big1024     = big.NewInt(1024)
 
 	// Commonly needed inf.Dec values-- treat as read only!
-	decZero      = inf.NewDec(0, 0)
-	decOne       = inf.NewDec(1, 0)
-	decMinusOne  = inf.NewDec(-1, 0)
-	decThousand  = inf.NewDec(1000, 0)
-	dec1024      = inf.NewDec(1024, 0)
-	decMinus1024 = inf.NewDec(-1024, 0)
+	decZero = inf.NewDec(0, 0)
+	decOne  = inf.NewDec(1, 0)
 
 	// Largest (in magnitude) number allowed.
 	maxAllowed = infDecAmount{inf.NewDec((1<<63)-1, 0)} // == max int64
diff --git a/vendor/k8s.io/apimachinery/pkg/api/resource/quantity.go b/vendor/k8s.io/apimachinery/pkg/api/resource/quantity.go
index 516d041d..d95e03aa 100644
--- a/vendor/k8s.io/apimachinery/pkg/api/resource/quantity.go
+++ b/vendor/k8s.io/apimachinery/pkg/api/resource/quantity.go
@@ -634,6 +634,11 @@ func (q Quantity) MarshalJSON() ([]byte, error) {
 	return result, nil
 }
 
+// ToUnstructured implements the value.UnstructuredConverter interface.
+func (q Quantity) ToUnstructured() interface{} {
+	return q.String()
+}
+
 // UnmarshalJSON implements the json.Unmarshaller interface.
 // TODO: Remove support for leading/trailing whitespace
 func (q *Quantity) UnmarshalJSON(value []byte) error {
diff --git a/vendor/k8s.io/apimachinery/pkg/api/validation/doc.go b/vendor/k8s.io/apimachinery/pkg/api/validation/doc.go
new file mode 100644
index 00000000..9f20152e
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/api/validation/doc.go
@@ -0,0 +1,18 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package validation contains generic api type validation functions.
+package validation // import "k8s.io/apimachinery/pkg/api/validation"
diff --git a/vendor/k8s.io/apimachinery/pkg/api/validation/generic.go b/vendor/k8s.io/apimachinery/pkg/api/validation/generic.go
new file mode 100644
index 00000000..348cdc08
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/api/validation/generic.go
@@ -0,0 +1,85 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package validation
+
+import (
+	"strings"
+
+	"k8s.io/apimachinery/pkg/util/validation"
+	"k8s.io/apimachinery/pkg/util/validation/field"
+)
+
+const IsNegativeErrorMsg string = `must be greater than or equal to 0`
+
+// ValidateNameFunc validates that the provided name is valid for a given resource type.
+// Not all resources have the same validation rules for names. Prefix is true
+// if the name will have a value appended to it.  If the name is not valid,
+// this returns a list of descriptions of individual characteristics of the
+// value that were not valid.  Otherwise this returns an empty list or nil.
+type ValidateNameFunc func(name string, prefix bool) []string
+
+// NameIsDNSSubdomain is a ValidateNameFunc for names that must be a DNS subdomain.
+func NameIsDNSSubdomain(name string, prefix bool) []string {
+	if prefix {
+		name = maskTrailingDash(name)
+	}
+	return validation.IsDNS1123Subdomain(name)
+}
+
+// NameIsDNSLabel is a ValidateNameFunc for names that must be a DNS 1123 label.
+func NameIsDNSLabel(name string, prefix bool) []string {
+	if prefix {
+		name = maskTrailingDash(name)
+	}
+	return validation.IsDNS1123Label(name)
+}
+
+// NameIsDNS1035Label is a ValidateNameFunc for names that must be a DNS 952 label.
+func NameIsDNS1035Label(name string, prefix bool) []string {
+	if prefix {
+		name = maskTrailingDash(name)
+	}
+	return validation.IsDNS1035Label(name)
+}
+
+// ValidateNamespaceName can be used to check whether the given namespace name is valid.
+// Prefix indicates this name will be used as part of generation, in which case
+// trailing dashes are allowed.
+var ValidateNamespaceName = NameIsDNSLabel
+
+// ValidateServiceAccountName can be used to check whether the given service account name is valid.
+// Prefix indicates this name will be used as part of generation, in which case
+// trailing dashes are allowed.
+var ValidateServiceAccountName = NameIsDNSSubdomain
+
+// maskTrailingDash replaces the final character of a string with a subdomain safe
+// value if is a dash.
+func maskTrailingDash(name string) string {
+	if strings.HasSuffix(name, "-") {
+		return name[:len(name)-2] + "a"
+	}
+	return name
+}
+
+// Validates that given value is not negative.
+func ValidateNonnegativeField(value int64, fldPath *field.Path) field.ErrorList {
+	allErrs := field.ErrorList{}
+	if value < 0 {
+		allErrs = append(allErrs, field.Invalid(fldPath, value, IsNegativeErrorMsg))
+	}
+	return allErrs
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/api/validation/objectmeta.go b/vendor/k8s.io/apimachinery/pkg/api/validation/objectmeta.go
new file mode 100644
index 00000000..90f566b1
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/api/validation/objectmeta.go
@@ -0,0 +1,263 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package validation
+
+import (
+	"fmt"
+	"strings"
+
+	apiequality "k8s.io/apimachinery/pkg/api/equality"
+	"k8s.io/apimachinery/pkg/api/meta"
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	v1validation "k8s.io/apimachinery/pkg/apis/meta/v1/validation"
+	"k8s.io/apimachinery/pkg/runtime/schema"
+	"k8s.io/apimachinery/pkg/util/sets"
+	"k8s.io/apimachinery/pkg/util/validation"
+	"k8s.io/apimachinery/pkg/util/validation/field"
+)
+
+const FieldImmutableErrorMsg string = `field is immutable`
+
+const totalAnnotationSizeLimitB int = 256 * (1 << 10) // 256 kB
+
+// BannedOwners is a black list of object that are not allowed to be owners.
+var BannedOwners = map[schema.GroupVersionKind]struct{}{
+	{Group: "", Version: "v1", Kind: "Event"}: {},
+}
+
+// ValidateClusterName can be used to check whether the given cluster name is valid.
+var ValidateClusterName = NameIsDNS1035Label
+
+// ValidateAnnotations validates that a set of annotations are correctly defined.
+func ValidateAnnotations(annotations map[string]string, fldPath *field.Path) field.ErrorList {
+	allErrs := field.ErrorList{}
+	var totalSize int64
+	for k, v := range annotations {
+		for _, msg := range validation.IsQualifiedName(strings.ToLower(k)) {
+			allErrs = append(allErrs, field.Invalid(fldPath, k, msg))
+		}
+		totalSize += (int64)(len(k)) + (int64)(len(v))
+	}
+	if totalSize > (int64)(totalAnnotationSizeLimitB) {
+		allErrs = append(allErrs, field.TooLong(fldPath, "", totalAnnotationSizeLimitB))
+	}
+	return allErrs
+}
+
+func validateOwnerReference(ownerReference metav1.OwnerReference, fldPath *field.Path) field.ErrorList {
+	allErrs := field.ErrorList{}
+	gvk := schema.FromAPIVersionAndKind(ownerReference.APIVersion, ownerReference.Kind)
+	// gvk.Group is empty for the legacy group.
+	if len(gvk.Version) == 0 {
+		allErrs = append(allErrs, field.Invalid(fldPath.Child("apiVersion"), ownerReference.APIVersion, "version must not be empty"))
+	}
+	if len(gvk.Kind) == 0 {
+		allErrs = append(allErrs, field.Invalid(fldPath.Child("kind"), ownerReference.Kind, "kind must not be empty"))
+	}
+	if len(ownerReference.Name) == 0 {
+		allErrs = append(allErrs, field.Invalid(fldPath.Child("name"), ownerReference.Name, "name must not be empty"))
+	}
+	if len(ownerReference.UID) == 0 {
+		allErrs = append(allErrs, field.Invalid(fldPath.Child("uid"), ownerReference.UID, "uid must not be empty"))
+	}
+	if _, ok := BannedOwners[gvk]; ok {
+		allErrs = append(allErrs, field.Invalid(fldPath, ownerReference, fmt.Sprintf("%s is disallowed from being an owner", gvk)))
+	}
+	return allErrs
+}
+
+func ValidateOwnerReferences(ownerReferences []metav1.OwnerReference, fldPath *field.Path) field.ErrorList {
+	allErrs := field.ErrorList{}
+	controllerName := ""
+	for _, ref := range ownerReferences {
+		allErrs = append(allErrs, validateOwnerReference(ref, fldPath)...)
+		if ref.Controller != nil && *ref.Controller {
+			if controllerName != "" {
+				allErrs = append(allErrs, field.Invalid(fldPath, ownerReferences,
+					fmt.Sprintf("Only one reference can have Controller set to true. Found \"true\" in references for %v and %v", controllerName, ref.Name)))
+			} else {
+				controllerName = ref.Name
+			}
+		}
+	}
+	return allErrs
+}
+
+// Validate finalizer names
+func ValidateFinalizerName(stringValue string, fldPath *field.Path) field.ErrorList {
+	allErrs := field.ErrorList{}
+	for _, msg := range validation.IsQualifiedName(stringValue) {
+		allErrs = append(allErrs, field.Invalid(fldPath, stringValue, msg))
+	}
+
+	return allErrs
+}
+
+func ValidateNoNewFinalizers(newFinalizers []string, oldFinalizers []string, fldPath *field.Path) field.ErrorList {
+	allErrs := field.ErrorList{}
+	extra := sets.NewString(newFinalizers...).Difference(sets.NewString(oldFinalizers...))
+	if len(extra) != 0 {
+		allErrs = append(allErrs, field.Forbidden(fldPath, fmt.Sprintf("no new finalizers can be added if the object is being deleted, found new finalizers %#v", extra.List())))
+	}
+	return allErrs
+}
+
+func ValidateImmutableField(newVal, oldVal interface{}, fldPath *field.Path) field.ErrorList {
+	allErrs := field.ErrorList{}
+	if !apiequality.Semantic.DeepEqual(oldVal, newVal) {
+		allErrs = append(allErrs, field.Invalid(fldPath, newVal, FieldImmutableErrorMsg))
+	}
+	return allErrs
+}
+
+// ValidateObjectMeta validates an object's metadata on creation. It expects that name generation has already
+// been performed.
+// It doesn't return an error for rootscoped resources with namespace, because namespace should already be cleared before.
+func ValidateObjectMeta(objMeta *metav1.ObjectMeta, requiresNamespace bool, nameFn ValidateNameFunc, fldPath *field.Path) field.ErrorList {
+	metadata, err := meta.Accessor(objMeta)
+	if err != nil {
+		allErrs := field.ErrorList{}
+		allErrs = append(allErrs, field.Invalid(fldPath, objMeta, err.Error()))
+		return allErrs
+	}
+	return ValidateObjectMetaAccessor(metadata, requiresNamespace, nameFn, fldPath)
+}
+
+// ValidateObjectMeta validates an object's metadata on creation. It expects that name generation has already
+// been performed.
+// It doesn't return an error for rootscoped resources with namespace, because namespace should already be cleared before.
+func ValidateObjectMetaAccessor(meta metav1.Object, requiresNamespace bool, nameFn ValidateNameFunc, fldPath *field.Path) field.ErrorList {
+	allErrs := field.ErrorList{}
+
+	if len(meta.GetGenerateName()) != 0 {
+		for _, msg := range nameFn(meta.GetGenerateName(), true) {
+			allErrs = append(allErrs, field.Invalid(fldPath.Child("generateName"), meta.GetGenerateName(), msg))
+		}
+	}
+	// If the generated name validates, but the calculated value does not, it's a problem with generation, and we
+	// report it here. This may confuse users, but indicates a programming bug and still must be validated.
+	// If there are multiple fields out of which one is required then add an or as a separator
+	if len(meta.GetName()) == 0 {
+		allErrs = append(allErrs, field.Required(fldPath.Child("name"), "name or generateName is required"))
+	} else {
+		for _, msg := range nameFn(meta.GetName(), false) {
+			allErrs = append(allErrs, field.Invalid(fldPath.Child("name"), meta.GetName(), msg))
+		}
+	}
+	if requiresNamespace {
+		if len(meta.GetNamespace()) == 0 {
+			allErrs = append(allErrs, field.Required(fldPath.Child("namespace"), ""))
+		} else {
+			for _, msg := range ValidateNamespaceName(meta.GetNamespace(), false) {
+				allErrs = append(allErrs, field.Invalid(fldPath.Child("namespace"), meta.GetNamespace(), msg))
+			}
+		}
+	} else {
+		if len(meta.GetNamespace()) != 0 {
+			allErrs = append(allErrs, field.Forbidden(fldPath.Child("namespace"), "not allowed on this type"))
+		}
+	}
+	if len(meta.GetClusterName()) != 0 {
+		for _, msg := range ValidateClusterName(meta.GetClusterName(), false) {
+			allErrs = append(allErrs, field.Invalid(fldPath.Child("clusterName"), meta.GetClusterName(), msg))
+		}
+	}
+	for _, entry := range meta.GetManagedFields() {
+		allErrs = append(allErrs, v1validation.ValidateFieldManager(entry.Manager, fldPath.Child("fieldManager"))...)
+	}
+	allErrs = append(allErrs, ValidateNonnegativeField(meta.GetGeneration(), fldPath.Child("generation"))...)
+	allErrs = append(allErrs, v1validation.ValidateLabels(meta.GetLabels(), fldPath.Child("labels"))...)
+	allErrs = append(allErrs, ValidateAnnotations(meta.GetAnnotations(), fldPath.Child("annotations"))...)
+	allErrs = append(allErrs, ValidateOwnerReferences(meta.GetOwnerReferences(), fldPath.Child("ownerReferences"))...)
+	allErrs = append(allErrs, ValidateFinalizers(meta.GetFinalizers(), fldPath.Child("finalizers"))...)
+	allErrs = append(allErrs, v1validation.ValidateManagedFields(meta.GetManagedFields(), fldPath.Child("managedFields"))...)
+	return allErrs
+}
+
+// ValidateFinalizers tests if the finalizers name are valid, and if there are conflicting finalizers.
+func ValidateFinalizers(finalizers []string, fldPath *field.Path) field.ErrorList {
+	allErrs := field.ErrorList{}
+	hasFinalizerOrphanDependents := false
+	hasFinalizerDeleteDependents := false
+	for _, finalizer := range finalizers {
+		allErrs = append(allErrs, ValidateFinalizerName(finalizer, fldPath)...)
+		if finalizer == metav1.FinalizerOrphanDependents {
+			hasFinalizerOrphanDependents = true
+		}
+		if finalizer == metav1.FinalizerDeleteDependents {
+			hasFinalizerDeleteDependents = true
+		}
+	}
+	if hasFinalizerDeleteDependents && hasFinalizerOrphanDependents {
+		allErrs = append(allErrs, field.Invalid(fldPath, finalizers, fmt.Sprintf("finalizer %s and %s cannot be both set", metav1.FinalizerOrphanDependents, metav1.FinalizerDeleteDependents)))
+	}
+	return allErrs
+}
+
+// ValidateObjectMetaUpdate validates an object's metadata when updated
+func ValidateObjectMetaUpdate(newMeta, oldMeta *metav1.ObjectMeta, fldPath *field.Path) field.ErrorList {
+	newMetadata, err := meta.Accessor(newMeta)
+	if err != nil {
+		allErrs := field.ErrorList{}
+		allErrs = append(allErrs, field.Invalid(fldPath, newMeta, err.Error()))
+		return allErrs
+	}
+	oldMetadata, err := meta.Accessor(oldMeta)
+	if err != nil {
+		allErrs := field.ErrorList{}
+		allErrs = append(allErrs, field.Invalid(fldPath, oldMeta, err.Error()))
+		return allErrs
+	}
+	return ValidateObjectMetaAccessorUpdate(newMetadata, oldMetadata, fldPath)
+}
+
+func ValidateObjectMetaAccessorUpdate(newMeta, oldMeta metav1.Object, fldPath *field.Path) field.ErrorList {
+	var allErrs field.ErrorList
+
+	// Finalizers cannot be added if the object is already being deleted.
+	if oldMeta.GetDeletionTimestamp() != nil {
+		allErrs = append(allErrs, ValidateNoNewFinalizers(newMeta.GetFinalizers(), oldMeta.GetFinalizers(), fldPath.Child("finalizers"))...)
+	}
+
+	// Reject updates that don't specify a resource version
+	if len(newMeta.GetResourceVersion()) == 0 {
+		allErrs = append(allErrs, field.Invalid(fldPath.Child("resourceVersion"), newMeta.GetResourceVersion(), "must be specified for an update"))
+	}
+
+	// Generation shouldn't be decremented
+	if newMeta.GetGeneration() < oldMeta.GetGeneration() {
+		allErrs = append(allErrs, field.Invalid(fldPath.Child("generation"), newMeta.GetGeneration(), "must not be decremented"))
+	}
+
+	for _, entry := range newMeta.GetManagedFields() {
+		allErrs = append(allErrs, v1validation.ValidateFieldManager(entry.Manager, fldPath.Child("fieldManager"))...)
+	}
+	allErrs = append(allErrs, ValidateImmutableField(newMeta.GetName(), oldMeta.GetName(), fldPath.Child("name"))...)
+	allErrs = append(allErrs, ValidateImmutableField(newMeta.GetNamespace(), oldMeta.GetNamespace(), fldPath.Child("namespace"))...)
+	allErrs = append(allErrs, ValidateImmutableField(newMeta.GetUID(), oldMeta.GetUID(), fldPath.Child("uid"))...)
+	allErrs = append(allErrs, ValidateImmutableField(newMeta.GetCreationTimestamp(), oldMeta.GetCreationTimestamp(), fldPath.Child("creationTimestamp"))...)
+	allErrs = append(allErrs, ValidateImmutableField(newMeta.GetDeletionTimestamp(), oldMeta.GetDeletionTimestamp(), fldPath.Child("deletionTimestamp"))...)
+	allErrs = append(allErrs, ValidateImmutableField(newMeta.GetDeletionGracePeriodSeconds(), oldMeta.GetDeletionGracePeriodSeconds(), fldPath.Child("deletionGracePeriodSeconds"))...)
+	allErrs = append(allErrs, ValidateImmutableField(newMeta.GetClusterName(), oldMeta.GetClusterName(), fldPath.Child("clusterName"))...)
+
+	allErrs = append(allErrs, v1validation.ValidateLabels(newMeta.GetLabels(), fldPath.Child("labels"))...)
+	allErrs = append(allErrs, ValidateAnnotations(newMeta.GetAnnotations(), fldPath.Child("annotations"))...)
+	allErrs = append(allErrs, ValidateOwnerReferences(newMeta.GetOwnerReferences(), fldPath.Child("ownerReferences"))...)
+	allErrs = append(allErrs, v1validation.ValidateManagedFields(newMeta.GetManagedFields(), fldPath.Child("managedFields"))...)
+
+	return allErrs
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/register.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/register.go
index b56140de..ceb64527 100644
--- a/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/register.go
+++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/register.go
@@ -47,19 +47,6 @@ func addToGroupVersion(scheme *runtime.Scheme) error {
 	if err := scheme.AddIgnoredConversionType(&metav1.TypeMeta{}, &metav1.TypeMeta{}); err != nil {
 		return err
 	}
-	err := scheme.AddConversionFuncs(
-		metav1.Convert_string_To_labels_Selector,
-		metav1.Convert_labels_Selector_To_string,
-
-		metav1.Convert_string_To_fields_Selector,
-		metav1.Convert_fields_Selector_To_string,
-
-		metav1.Convert_Map_string_To_string_To_v1_LabelSelector,
-		metav1.Convert_v1_LabelSelector_To_Map_string_To_string,
-	)
-	if err != nil {
-		return err
-	}
 	// ListOptions is the only options struct which needs conversion (it exposes labels and fields
 	// as selectors for convenience). The other types have only a single representation today.
 	scheme.AddKnownTypes(SchemeGroupVersion,
@@ -71,8 +58,8 @@ func addToGroupVersion(scheme *runtime.Scheme) error {
 		&metav1.UpdateOptions{},
 	)
 	scheme.AddKnownTypes(SchemeGroupVersion,
-		&metav1beta1.Table{},
-		&metav1beta1.TableOptions{},
+		&metav1.Table{},
+		&metav1.TableOptions{},
 		&metav1beta1.PartialObjectMetadata{},
 		&metav1beta1.PartialObjectMetadataList{},
 	)
@@ -87,6 +74,7 @@ func addToGroupVersion(scheme *runtime.Scheme) error {
 		&metav1.DeleteOptions{},
 		&metav1.CreateOptions{},
 		&metav1.UpdateOptions{})
+
 	metav1.AddToGroupVersion(scheme, metav1.SchemeGroupVersion)
 	return nil
 }
@@ -95,5 +83,4 @@ func addToGroupVersion(scheme *runtime.Scheme) error {
 // the logic for conversion private.
 func init() {
 	localSchemeBuilder.Register(addToGroupVersion)
-	localSchemeBuilder.Register(metav1.RegisterConversions)
 }
diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/conversion.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/conversion.go
index 285a41a4..b937398c 100644
--- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/conversion.go
+++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/conversion.go
@@ -26,69 +26,10 @@ import (
 	"k8s.io/apimachinery/pkg/conversion"
 	"k8s.io/apimachinery/pkg/fields"
 	"k8s.io/apimachinery/pkg/labels"
-	"k8s.io/apimachinery/pkg/runtime"
 	"k8s.io/apimachinery/pkg/types"
 	"k8s.io/apimachinery/pkg/util/intstr"
 )
 
-func AddConversionFuncs(scheme *runtime.Scheme) error {
-	return scheme.AddConversionFuncs(
-		Convert_v1_TypeMeta_To_v1_TypeMeta,
-
-		Convert_v1_ListMeta_To_v1_ListMeta,
-
-		Convert_v1_DeleteOptions_To_v1_DeleteOptions,
-
-		Convert_intstr_IntOrString_To_intstr_IntOrString,
-		Convert_Pointer_intstr_IntOrString_To_intstr_IntOrString,
-		Convert_intstr_IntOrString_To_Pointer_intstr_IntOrString,
-
-		Convert_Pointer_v1_Duration_To_v1_Duration,
-		Convert_v1_Duration_To_Pointer_v1_Duration,
-
-		Convert_Slice_string_To_v1_Time,
-		Convert_Slice_string_To_Pointer_v1_Time,
-
-		Convert_v1_Time_To_v1_Time,
-		Convert_v1_MicroTime_To_v1_MicroTime,
-
-		Convert_resource_Quantity_To_resource_Quantity,
-
-		Convert_string_To_labels_Selector,
-		Convert_labels_Selector_To_string,
-
-		Convert_string_To_fields_Selector,
-		Convert_fields_Selector_To_string,
-
-		Convert_Pointer_bool_To_bool,
-		Convert_bool_To_Pointer_bool,
-
-		Convert_Pointer_string_To_string,
-		Convert_string_To_Pointer_string,
-
-		Convert_Pointer_int64_To_int,
-		Convert_int_To_Pointer_int64,
-
-		Convert_Pointer_int32_To_int32,
-		Convert_int32_To_Pointer_int32,
-
-		Convert_Pointer_int64_To_int64,
-		Convert_int64_To_Pointer_int64,
-
-		Convert_Pointer_float64_To_float64,
-		Convert_float64_To_Pointer_float64,
-
-		Convert_Map_string_To_string_To_v1_LabelSelector,
-		Convert_v1_LabelSelector_To_Map_string_To_string,
-
-		Convert_Slice_string_To_Slice_int32,
-
-		Convert_Slice_string_To_Pointer_v1_DeletionPropagation,
-
-		Convert_Slice_string_To_v1_IncludeObjectPolicy,
-	)
-}
-
 func Convert_Pointer_float64_To_float64(in **float64, out *float64, s conversion.Scope) error {
 	if *in == nil {
 		*out = 0
diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/duration.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/duration.go
index babe8a8b..a22b0787 100644
--- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/duration.go
+++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/duration.go
@@ -49,6 +49,11 @@ func (d Duration) MarshalJSON() ([]byte, error) {
 	return json.Marshal(d.Duration.String())
 }
 
+// ToUnstructured implements the value.UnstructuredConverter interface.
+func (d Duration) ToUnstructured() interface{} {
+	return d.Duration.String()
+}
+
 // OpenAPISchemaType is used by the kube-openapi generator when constructing
 // the OpenAPI spec of this type.
 //
diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.pb.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.pb.go
index 31b1d955..3288c564 100644
--- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.pb.go
+++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.pb.go
@@ -47,7 +47,7 @@ var _ = time.Kitchen
 // is compatible with the proto package it is being compiled against.
 // A compilation error at this line likely means your copy of the
 // proto package needs to be updated.
-const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
+const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
 
 func (m *APIGroup) Reset()      { *m = APIGroup{} }
 func (*APIGroup) ProtoMessage() {}
@@ -11004,6 +11004,7 @@ func (m *WatchEvent) Unmarshal(dAtA []byte) error {
 func skipGenerated(dAtA []byte) (n int, err error) {
 	l := len(dAtA)
 	iNdEx := 0
+	depth := 0
 	for iNdEx < l {
 		var wire uint64
 		for shift := uint(0); ; shift += 7 {
@@ -11035,10 +11036,8 @@ func skipGenerated(dAtA []byte) (n int, err error) {
 					break
 				}
 			}
-			return iNdEx, nil
 		case 1:
 			iNdEx += 8
-			return iNdEx, nil
 		case 2:
 			var length int
 			for shift := uint(0); ; shift += 7 {
@@ -11059,55 +11058,30 @@ func skipGenerated(dAtA []byte) (n int, err error) {
 				return 0, ErrInvalidLengthGenerated
 			}
 			iNdEx += length
-			if iNdEx < 0 {
-				return 0, ErrInvalidLengthGenerated
-			}
-			return iNdEx, nil
 		case 3:
-			for {
-				var innerWire uint64
-				var start int = iNdEx
-				for shift := uint(0); ; shift += 7 {
-					if shift >= 64 {
-						return 0, ErrIntOverflowGenerated
-					}
-					if iNdEx >= l {
-						return 0, io.ErrUnexpectedEOF
-					}
-					b := dAtA[iNdEx]
-					iNdEx++
-					innerWire |= (uint64(b) & 0x7F) << shift
-					if b < 0x80 {
-						break
-					}
-				}
-				innerWireType := int(innerWire & 0x7)
-				if innerWireType == 4 {
-					break
-				}
-				next, err := skipGenerated(dAtA[start:])
-				if err != nil {
-					return 0, err
-				}
-				iNdEx = start + next
-				if iNdEx < 0 {
-					return 0, ErrInvalidLengthGenerated
-				}
-			}
-			return iNdEx, nil
+			depth++
 		case 4:
-			return iNdEx, nil
+			if depth == 0 {
+				return 0, ErrUnexpectedEndOfGroupGenerated
+			}
+			depth--
 		case 5:
 			iNdEx += 4
-			return iNdEx, nil
 		default:
 			return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
 		}
+		if iNdEx < 0 {
+			return 0, ErrInvalidLengthGenerated
+		}
+		if depth == 0 {
+			return iNdEx, nil
+		}
 	}
-	panic("unreachable")
+	return 0, io.ErrUnexpectedEOF
 }
 
 var (
-	ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling")
-	ErrIntOverflowGenerated   = fmt.Errorf("proto: integer overflow")
+	ErrInvalidLengthGenerated        = fmt.Errorf("proto: negative length found during unmarshaling")
+	ErrIntOverflowGenerated          = fmt.Errorf("proto: integer overflow")
+	ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group")
 )
diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/helpers.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/helpers.go
index ec016fd3..ad989ad7 100644
--- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/helpers.go
+++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/helpers.go
@@ -252,7 +252,9 @@ func ResetObjectMetaForStatus(meta, existingMeta Object) {
 	meta.SetAnnotations(existingMeta.GetAnnotations())
 	meta.SetFinalizers(existingMeta.GetFinalizers())
 	meta.SetOwnerReferences(existingMeta.GetOwnerReferences())
-	meta.SetManagedFields(existingMeta.GetManagedFields())
+	// managedFields must be preserved since it's been modified to
+	// track changed fields in the status update.
+	//meta.SetManagedFields(existingMeta.GetManagedFields())
 }
 
 // MarshalJSON implements json.Marshaler
diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/register.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/register.go
index a7b8aa34..c1a07717 100644
--- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/register.go
+++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/register.go
@@ -53,15 +53,6 @@ var scheme = runtime.NewScheme()
 // ParameterCodec knows about query parameters used with the meta v1 API spec.
 var ParameterCodec = runtime.NewParameterCodec(scheme)
 
-func addEventConversionFuncs(scheme *runtime.Scheme) error {
-	return scheme.AddConversionFuncs(
-		Convert_v1_WatchEvent_To_watch_Event,
-		Convert_v1_InternalEvent_To_v1_WatchEvent,
-		Convert_watch_Event_To_v1_WatchEvent,
-		Convert_v1_WatchEvent_To_v1_InternalEvent,
-	)
-}
-
 var optionsTypes = []runtime.Object{
 	&ListOptions{},
 	&ExportOptions{},
@@ -90,10 +81,8 @@ func AddToGroupVersion(scheme *runtime.Scheme, groupVersion schema.GroupVersion)
 		&APIResourceList{},
 	)
 
-	utilruntime.Must(addEventConversionFuncs(scheme))
-
 	// register manually. This usually goes through the SchemeBuilder, which we cannot use here.
-	utilruntime.Must(AddConversionFuncs(scheme))
+	utilruntime.Must(RegisterConversions(scheme))
 	utilruntime.Must(RegisterDefaults(scheme))
 }
 
@@ -106,9 +95,7 @@ func AddMetaToScheme(scheme *runtime.Scheme) error {
 		&PartialObjectMetadataList{},
 	)
 
-	return scheme.AddConversionFuncs(
-		Convert_Slice_string_To_v1_IncludeObjectPolicy,
-	)
+	return nil
 }
 
 func init() {
diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time.go
index fe510ed9..4a1d89cf 100644
--- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time.go
+++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time.go
@@ -153,6 +153,16 @@ func (t Time) MarshalJSON() ([]byte, error) {
 	return buf, nil
 }
 
+// ToUnstructured implements the value.UnstructuredConverter interface.
+func (t Time) ToUnstructured() interface{} {
+	if t.IsZero() {
+		return nil
+	}
+	buf := make([]byte, 0, len(time.RFC3339))
+	buf = t.UTC().AppendFormat(buf, time.RFC3339)
+	return string(buf)
+}
+
 // OpenAPISchemaType is used by the kube-openapi generator when constructing
 // the OpenAPI spec of this type.
 //
diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructuredscheme/scheme.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructuredscheme/scheme.go
new file mode 100644
index 00000000..3d7b6f05
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructuredscheme/scheme.go
@@ -0,0 +1,133 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package unstructuredscheme
+
+import (
+	"fmt"
+
+	"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
+	"k8s.io/apimachinery/pkg/runtime"
+	"k8s.io/apimachinery/pkg/runtime/schema"
+	"k8s.io/apimachinery/pkg/runtime/serializer"
+	"k8s.io/apimachinery/pkg/runtime/serializer/json"
+	"k8s.io/apimachinery/pkg/runtime/serializer/versioning"
+)
+
+var (
+	scheme = runtime.NewScheme()
+	codecs = serializer.NewCodecFactory(scheme)
+)
+
+// NewUnstructuredNegotiatedSerializer returns a simple, negotiated serializer
+func NewUnstructuredNegotiatedSerializer() runtime.NegotiatedSerializer {
+	return unstructuredNegotiatedSerializer{
+		scheme:  scheme,
+		typer:   NewUnstructuredObjectTyper(),
+		creator: NewUnstructuredCreator(),
+	}
+}
+
+type unstructuredNegotiatedSerializer struct {
+	scheme  *runtime.Scheme
+	typer   runtime.ObjectTyper
+	creator runtime.ObjectCreater
+}
+
+func (s unstructuredNegotiatedSerializer) SupportedMediaTypes() []runtime.SerializerInfo {
+	return []runtime.SerializerInfo{
+		{
+			MediaType:        "application/json",
+			MediaTypeType:    "application",
+			MediaTypeSubType: "json",
+			EncodesAsText:    true,
+			Serializer:       json.NewSerializer(json.DefaultMetaFactory, s.creator, s.typer, false),
+			PrettySerializer: json.NewSerializer(json.DefaultMetaFactory, s.creator, s.typer, true),
+			StreamSerializer: &runtime.StreamSerializerInfo{
+				EncodesAsText: true,
+				Serializer:    json.NewSerializer(json.DefaultMetaFactory, s.creator, s.typer, false),
+				Framer:        json.Framer,
+			},
+		},
+		{
+			MediaType:        "application/yaml",
+			MediaTypeType:    "application",
+			MediaTypeSubType: "yaml",
+			EncodesAsText:    true,
+			Serializer:       json.NewYAMLSerializer(json.DefaultMetaFactory, s.creator, s.typer),
+		},
+	}
+}
+
+func (s unstructuredNegotiatedSerializer) EncoderForVersion(encoder runtime.Encoder, gv runtime.GroupVersioner) runtime.Encoder {
+	return versioning.NewDefaultingCodecForScheme(s.scheme, encoder, nil, gv, nil)
+}
+
+func (s unstructuredNegotiatedSerializer) DecoderToVersion(decoder runtime.Decoder, gv runtime.GroupVersioner) runtime.Decoder {
+	return versioning.NewDefaultingCodecForScheme(s.scheme, nil, decoder, nil, gv)
+}
+
+type unstructuredObjectTyper struct {
+}
+
+// NewUnstructuredObjectTyper returns an object typer that can deal with unstructured things
+func NewUnstructuredObjectTyper() runtime.ObjectTyper {
+	return unstructuredObjectTyper{}
+}
+
+func (t unstructuredObjectTyper) ObjectKinds(obj runtime.Object) ([]schema.GroupVersionKind, bool, error) {
+	// Delegate for things other than Unstructured.
+	if _, ok := obj.(runtime.Unstructured); !ok {
+		return nil, false, fmt.Errorf("cannot type %T", obj)
+	}
+	gvk := obj.GetObjectKind().GroupVersionKind()
+	if len(gvk.Kind) == 0 {
+		return nil, false, runtime.NewMissingKindErr("object has no kind field ")
+	}
+	if len(gvk.Version) == 0 {
+		return nil, false, runtime.NewMissingVersionErr("object has no apiVersion field")
+	}
+
+	return []schema.GroupVersionKind{obj.GetObjectKind().GroupVersionKind()}, false, nil
+}
+
+func (t unstructuredObjectTyper) Recognizes(gvk schema.GroupVersionKind) bool {
+	return true
+}
+
+type unstructuredCreator struct{}
+
+// NewUnstructuredCreator returns a simple object creator that always returns an unstructured
+func NewUnstructuredCreator() runtime.ObjectCreater {
+	return unstructuredCreator{}
+}
+
+func (c unstructuredCreator) New(kind schema.GroupVersionKind) (runtime.Object, error) {
+	ret := &unstructured.Unstructured{}
+	ret.SetGroupVersionKind(kind)
+	return ret, nil
+}
+
+type unstructuredDefaulter struct {
+}
+
+// NewUnstructuredDefaulter returns defaulter suitable for unstructured types that doesn't default anything
+func NewUnstructuredDefaulter() runtime.ObjectDefaulter {
+	return unstructuredDefaulter{}
+}
+
+func (d unstructuredDefaulter) Default(in runtime.Object) {
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/validation/validation.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/validation/validation.go
new file mode 100644
index 00000000..2743793d
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/validation/validation.go
@@ -0,0 +1,186 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package validation
+
+import (
+	"fmt"
+	"unicode"
+
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	"k8s.io/apimachinery/pkg/types"
+	"k8s.io/apimachinery/pkg/util/sets"
+	"k8s.io/apimachinery/pkg/util/validation"
+	"k8s.io/apimachinery/pkg/util/validation/field"
+)
+
+func ValidateLabelSelector(ps *metav1.LabelSelector, fldPath *field.Path) field.ErrorList {
+	allErrs := field.ErrorList{}
+	if ps == nil {
+		return allErrs
+	}
+	allErrs = append(allErrs, ValidateLabels(ps.MatchLabels, fldPath.Child("matchLabels"))...)
+	for i, expr := range ps.MatchExpressions {
+		allErrs = append(allErrs, ValidateLabelSelectorRequirement(expr, fldPath.Child("matchExpressions").Index(i))...)
+	}
+	return allErrs
+}
+
+func ValidateLabelSelectorRequirement(sr metav1.LabelSelectorRequirement, fldPath *field.Path) field.ErrorList {
+	allErrs := field.ErrorList{}
+	switch sr.Operator {
+	case metav1.LabelSelectorOpIn, metav1.LabelSelectorOpNotIn:
+		if len(sr.Values) == 0 {
+			allErrs = append(allErrs, field.Required(fldPath.Child("values"), "must be specified when `operator` is 'In' or 'NotIn'"))
+		}
+	case metav1.LabelSelectorOpExists, metav1.LabelSelectorOpDoesNotExist:
+		if len(sr.Values) > 0 {
+			allErrs = append(allErrs, field.Forbidden(fldPath.Child("values"), "may not be specified when `operator` is 'Exists' or 'DoesNotExist'"))
+		}
+	default:
+		allErrs = append(allErrs, field.Invalid(fldPath.Child("operator"), sr.Operator, "not a valid selector operator"))
+	}
+	allErrs = append(allErrs, ValidateLabelName(sr.Key, fldPath.Child("key"))...)
+	return allErrs
+}
+
+// ValidateLabelName validates that the label name is correctly defined.
+func ValidateLabelName(labelName string, fldPath *field.Path) field.ErrorList {
+	allErrs := field.ErrorList{}
+	for _, msg := range validation.IsQualifiedName(labelName) {
+		allErrs = append(allErrs, field.Invalid(fldPath, labelName, msg))
+	}
+	return allErrs
+}
+
+// ValidateLabels validates that a set of labels are correctly defined.
+func ValidateLabels(labels map[string]string, fldPath *field.Path) field.ErrorList {
+	allErrs := field.ErrorList{}
+	for k, v := range labels {
+		allErrs = append(allErrs, ValidateLabelName(k, fldPath)...)
+		for _, msg := range validation.IsValidLabelValue(v) {
+			allErrs = append(allErrs, field.Invalid(fldPath, v, msg))
+		}
+	}
+	return allErrs
+}
+
+func ValidateDeleteOptions(options *metav1.DeleteOptions) field.ErrorList {
+	allErrs := field.ErrorList{}
+	if options.OrphanDependents != nil && options.PropagationPolicy != nil {
+		allErrs = append(allErrs, field.Invalid(field.NewPath("propagationPolicy"), options.PropagationPolicy, "orphanDependents and deletionPropagation cannot be both set"))
+	}
+	if options.PropagationPolicy != nil &&
+		*options.PropagationPolicy != metav1.DeletePropagationForeground &&
+		*options.PropagationPolicy != metav1.DeletePropagationBackground &&
+		*options.PropagationPolicy != metav1.DeletePropagationOrphan {
+		allErrs = append(allErrs, field.NotSupported(field.NewPath("propagationPolicy"), options.PropagationPolicy, []string{string(metav1.DeletePropagationForeground), string(metav1.DeletePropagationBackground), string(metav1.DeletePropagationOrphan), "nil"}))
+	}
+	allErrs = append(allErrs, ValidateDryRun(field.NewPath("dryRun"), options.DryRun)...)
+	return allErrs
+}
+
+func ValidateCreateOptions(options *metav1.CreateOptions) field.ErrorList {
+	return append(
+		ValidateFieldManager(options.FieldManager, field.NewPath("fieldManager")),
+		ValidateDryRun(field.NewPath("dryRun"), options.DryRun)...,
+	)
+}
+
+func ValidateUpdateOptions(options *metav1.UpdateOptions) field.ErrorList {
+	return append(
+		ValidateFieldManager(options.FieldManager, field.NewPath("fieldManager")),
+		ValidateDryRun(field.NewPath("dryRun"), options.DryRun)...,
+	)
+}
+
+func ValidatePatchOptions(options *metav1.PatchOptions, patchType types.PatchType) field.ErrorList {
+	allErrs := field.ErrorList{}
+	if patchType != types.ApplyPatchType {
+		if options.Force != nil {
+			allErrs = append(allErrs, field.Forbidden(field.NewPath("force"), "may not be specified for non-apply patch"))
+		}
+	} else {
+		if options.FieldManager == "" {
+			// This field is defaulted to "kubectl" by kubectl, but HAS TO be explicitly set by controllers.
+			allErrs = append(allErrs, field.Required(field.NewPath("fieldManager"), "is required for apply patch"))
+		}
+	}
+	allErrs = append(allErrs, ValidateFieldManager(options.FieldManager, field.NewPath("fieldManager"))...)
+	allErrs = append(allErrs, ValidateDryRun(field.NewPath("dryRun"), options.DryRun)...)
+	return allErrs
+}
+
+var FieldManagerMaxLength = 128
+
+// ValidateFieldManager valides that the fieldManager is the proper length and
+// only has printable characters.
+func ValidateFieldManager(fieldManager string, fldPath *field.Path) field.ErrorList {
+	allErrs := field.ErrorList{}
+	// the field can not be set as a `*string`, so a empty string ("") is
+	// considered as not set and is defaulted by the rest of the process
+	// (unless apply is used, in which case it is required).
+	if len(fieldManager) > FieldManagerMaxLength {
+		allErrs = append(allErrs, field.TooLong(fldPath, fieldManager, FieldManagerMaxLength))
+	}
+	// Verify that all characters are printable.
+	for i, r := range fieldManager {
+		if !unicode.IsPrint(r) {
+			allErrs = append(allErrs, field.Invalid(fldPath, fieldManager, fmt.Sprintf("invalid character %#U (at position %d)", r, i)))
+		}
+	}
+
+	return allErrs
+}
+
+var allowedDryRunValues = sets.NewString(metav1.DryRunAll)
+
+// ValidateDryRun validates that a dryRun query param only contains allowed values.
+func ValidateDryRun(fldPath *field.Path, dryRun []string) field.ErrorList {
+	allErrs := field.ErrorList{}
+	if !allowedDryRunValues.HasAll(dryRun...) {
+		allErrs = append(allErrs, field.NotSupported(fldPath, dryRun, allowedDryRunValues.List()))
+	}
+	return allErrs
+}
+
+const UninitializedStatusUpdateErrorMsg string = `must not update status when the object is uninitialized`
+
+// ValidateTableOptions returns any invalid flags on TableOptions.
+func ValidateTableOptions(opts *metav1.TableOptions) field.ErrorList {
+	var allErrs field.ErrorList
+	switch opts.IncludeObject {
+	case metav1.IncludeMetadata, metav1.IncludeNone, metav1.IncludeObject, "":
+	default:
+		allErrs = append(allErrs, field.Invalid(field.NewPath("includeObject"), opts.IncludeObject, "must be 'Metadata', 'Object', 'None', or empty"))
+	}
+	return allErrs
+}
+
+func ValidateManagedFields(fieldsList []metav1.ManagedFieldsEntry, fldPath *field.Path) field.ErrorList {
+	var allErrs field.ErrorList
+	for _, fields := range fieldsList {
+		switch fields.Operation {
+		case metav1.ManagedFieldsOperationApply, metav1.ManagedFieldsOperationUpdate:
+		default:
+			allErrs = append(allErrs, field.Invalid(fldPath.Child("operation"), fields.Operation, "must be `Apply` or `Update`"))
+		}
+		if fields.FieldsType != "FieldsV1" {
+			allErrs = append(allErrs, field.Invalid(fldPath.Child("fieldsType"), fields.FieldsType, "must be `FieldsV1`"))
+		}
+	}
+	return allErrs
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.pb.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.pb.go
index 5fae30ae..cd5fc902 100644
--- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.pb.go
+++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.pb.go
@@ -42,7 +42,7 @@ var _ = math.Inf
 // is compatible with the proto package it is being compiled against.
 // A compilation error at this line likely means your copy of the
 // proto package needs to be updated.
-const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
+const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
 
 func (m *PartialObjectMetadataList) Reset()      { *m = PartialObjectMetadataList{} }
 func (*PartialObjectMetadataList) ProtoMessage() {}
@@ -81,28 +81,27 @@ func init() {
 }
 
 var fileDescriptor_90ec10f86b91f9a8 = []byte{
-	// 321 bytes of a gzipped FileDescriptorProto
+	// 317 bytes of a gzipped FileDescriptorProto
 	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x91, 0x41, 0x4b, 0xf3, 0x30,
-	0x18, 0xc7, 0x9b, 0xf7, 0x65, 0x38, 0x3a, 0x04, 0xd9, 0x69, 0xee, 0x90, 0x0d, 0x4f, 0xf3, 0xb0,
-	0x84, 0x0d, 0x11, 0xc1, 0xdb, 0x6e, 0x82, 0xa2, 0xec, 0x28, 0x1e, 0x4c, 0xbb, 0xc7, 0x2e, 0xd6,
-	0x34, 0x25, 0x79, 0x3a, 0xf0, 0xe6, 0x47, 0xf0, 0x63, 0xed, 0xb8, 0xe3, 0x40, 0x18, 0xae, 0x7e,
-	0x11, 0x49, 0x57, 0x45, 0xa6, 0x62, 0x6f, 0x79, 0xfe, 0xe1, 0xf7, 0xcb, 0x3f, 0x89, 0x3f, 0x8e,
-	0x4f, 0x2c, 0x93, 0x9a, 0xc7, 0x59, 0x00, 0x26, 0x01, 0x04, 0xcb, 0x67, 0x90, 0x4c, 0xb4, 0xe1,
-	0xe5, 0x86, 0x48, 0xa5, 0x12, 0xe1, 0x54, 0x26, 0x60, 0x1e, 0x79, 0x1a, 0x47, 0x2e, 0xb0, 0x5c,
-	0x01, 0x0a, 0x3e, 0x1b, 0x04, 0x80, 0x62, 0xc0, 0x23, 0x48, 0xc0, 0x08, 0x84, 0x09, 0x4b, 0x8d,
-	0x46, 0xdd, 0x3c, 0xdc, 0xa0, 0xec, 0x2b, 0xca, 0xd2, 0x38, 0x72, 0x81, 0x65, 0x0e, 0x65, 0x25,
-	0xda, 0xee, 0x47, 0x12, 0xa7, 0x59, 0xc0, 0x42, 0xad, 0x78, 0xa4, 0x23, 0xcd, 0x0b, 0x43, 0x90,
-	0xdd, 0x15, 0x53, 0x31, 0x14, 0xab, 0x8d, 0xb9, 0x7d, 0x54, 0xa5, 0xd4, 0x76, 0x9f, 0xf6, 0xaf,
-	0x57, 0x31, 0x59, 0x82, 0x52, 0xc1, 0x37, 0xe0, 0xf8, 0x2f, 0xc0, 0x86, 0x53, 0x50, 0x62, 0x9b,
-	0x3b, 0x78, 0x21, 0xfe, 0xfe, 0x95, 0x30, 0x28, 0xc5, 0xc3, 0x65, 0x70, 0x0f, 0x21, 0x5e, 0x00,
-	0x8a, 0x89, 0x40, 0x71, 0x2e, 0x2d, 0x36, 0x6f, 0xfc, 0xba, 0x2a, 0xe7, 0xd6, 0xbf, 0x2e, 0xe9,
-	0x35, 0x86, 0x8c, 0x55, 0x79, 0x29, 0xe6, 0x68, 0x67, 0x1a, 0xed, 0xcd, 0x57, 0x1d, 0x2f, 0x5f,
-	0x75, 0xea, 0x1f, 0xc9, 0xf8, 0xd3, 0xd8, 0xbc, 0xf5, 0x6b, 0x12, 0x41, 0xd9, 0x16, 0xe9, 0xfe,
-	0xef, 0x35, 0x86, 0xa7, 0xd5, 0xd4, 0x3f, 0xb6, 0x1d, 0xed, 0x96, 0xe7, 0xd4, 0xce, 0x9c, 0x71,
-	0xbc, 0x11, 0x8f, 0xfa, 0xf3, 0x35, 0xf5, 0x16, 0x6b, 0xea, 0x2d, 0xd7, 0xd4, 0x7b, 0xca, 0x29,
-	0x99, 0xe7, 0x94, 0x2c, 0x72, 0x4a, 0x96, 0x39, 0x25, 0xaf, 0x39, 0x25, 0xcf, 0x6f, 0xd4, 0xbb,
-	0xde, 0x29, 0xbf, 0xf6, 0x3d, 0x00, 0x00, 0xff, 0xff, 0xc6, 0x7e, 0x00, 0x08, 0x5a, 0x02, 0x00,
-	0x00,
+	0x1c, 0xc6, 0x9b, 0xf7, 0x65, 0x38, 0x3a, 0x04, 0xd9, 0x69, 0xee, 0x90, 0x0d, 0x4f, 0xf3, 0xb0,
+	0x84, 0x0d, 0x11, 0xc1, 0xdb, 0x6e, 0x82, 0xa2, 0xec, 0x28, 0x1e, 0x4c, 0xbb, 0xbf, 0x5d, 0xac,
+	0x69, 0x4a, 0xf2, 0xef, 0xc0, 0x9b, 0x1f, 0xc1, 0x8f, 0xb5, 0xe3, 0x8e, 0x03, 0x61, 0xb8, 0xf8,
+	0x45, 0x24, 0x5d, 0x15, 0x19, 0x0a, 0xbb, 0xf5, 0x79, 0xca, 0xef, 0x97, 0x27, 0x24, 0x1c, 0xa7,
+	0x67, 0x96, 0x49, 0xcd, 0xd3, 0x22, 0x02, 0x93, 0x01, 0x82, 0xe5, 0x33, 0xc8, 0x26, 0xda, 0xf0,
+	0xea, 0x87, 0xc8, 0xa5, 0x12, 0xf1, 0x54, 0x66, 0x60, 0x9e, 0x79, 0x9e, 0x26, 0xbe, 0xb0, 0x5c,
+	0x01, 0x0a, 0x3e, 0x1b, 0x44, 0x80, 0x62, 0xc0, 0x13, 0xc8, 0xc0, 0x08, 0x84, 0x09, 0xcb, 0x8d,
+	0x46, 0xdd, 0x3c, 0xde, 0xa0, 0xec, 0x27, 0xca, 0xf2, 0x34, 0xf1, 0x85, 0x65, 0x1e, 0x65, 0x15,
+	0xda, 0xee, 0x27, 0x12, 0xa7, 0x45, 0xc4, 0x62, 0xad, 0x78, 0xa2, 0x13, 0xcd, 0x4b, 0x43, 0x54,
+	0x3c, 0x94, 0xa9, 0x0c, 0xe5, 0xd7, 0xc6, 0xdc, 0x3e, 0xd9, 0x65, 0xd4, 0xf6, 0x9e, 0xf6, 0xe9,
+	0x5f, 0x94, 0x29, 0x32, 0x94, 0x0a, 0xb8, 0x8d, 0xa7, 0xa0, 0xc4, 0x36, 0x77, 0xf4, 0x46, 0xc2,
+	0xc3, 0x1b, 0x61, 0x50, 0x8a, 0xa7, 0xeb, 0xe8, 0x11, 0x62, 0xbc, 0x02, 0x14, 0x13, 0x81, 0xe2,
+	0x52, 0x5a, 0x6c, 0xde, 0x85, 0x75, 0x55, 0xe5, 0xd6, 0xbf, 0x2e, 0xe9, 0x35, 0x86, 0x8c, 0xed,
+	0x72, 0x71, 0xe6, 0x69, 0x6f, 0x1a, 0x1d, 0xcc, 0x57, 0x9d, 0xc0, 0xad, 0x3a, 0xf5, 0xaf, 0x66,
+	0xfc, 0x6d, 0x6c, 0xde, 0x87, 0x35, 0x89, 0xa0, 0x6c, 0x8b, 0x74, 0xff, 0xf7, 0x1a, 0xc3, 0xf3,
+	0xdd, 0xd4, 0xbf, 0xae, 0x1d, 0xed, 0x57, 0xe7, 0xd4, 0x2e, 0xbc, 0x71, 0xbc, 0x11, 0x8f, 0xfa,
+	0xf3, 0x35, 0x0d, 0x16, 0x6b, 0x1a, 0x2c, 0xd7, 0x34, 0x78, 0x71, 0x94, 0xcc, 0x1d, 0x25, 0x0b,
+	0x47, 0xc9, 0xd2, 0x51, 0xf2, 0xee, 0x28, 0x79, 0xfd, 0xa0, 0xc1, 0xed, 0x5e, 0xf5, 0x52, 0x9f,
+	0x01, 0x00, 0x00, 0xff, 0xff, 0xf7, 0x82, 0x5b, 0x80, 0x29, 0x02, 0x00, 0x00,
 }
 
 func (m *PartialObjectMetadataList) Marshal() (dAtA []byte, err error) {
@@ -333,6 +332,7 @@ func (m *PartialObjectMetadataList) Unmarshal(dAtA []byte) error {
 func skipGenerated(dAtA []byte) (n int, err error) {
 	l := len(dAtA)
 	iNdEx := 0
+	depth := 0
 	for iNdEx < l {
 		var wire uint64
 		for shift := uint(0); ; shift += 7 {
@@ -364,10 +364,8 @@ func skipGenerated(dAtA []byte) (n int, err error) {
 					break
 				}
 			}
-			return iNdEx, nil
 		case 1:
 			iNdEx += 8
-			return iNdEx, nil
 		case 2:
 			var length int
 			for shift := uint(0); ; shift += 7 {
@@ -388,55 +386,30 @@ func skipGenerated(dAtA []byte) (n int, err error) {
 				return 0, ErrInvalidLengthGenerated
 			}
 			iNdEx += length
-			if iNdEx < 0 {
-				return 0, ErrInvalidLengthGenerated
-			}
-			return iNdEx, nil
 		case 3:
-			for {
-				var innerWire uint64
-				var start int = iNdEx
-				for shift := uint(0); ; shift += 7 {
-					if shift >= 64 {
-						return 0, ErrIntOverflowGenerated
-					}
-					if iNdEx >= l {
-						return 0, io.ErrUnexpectedEOF
-					}
-					b := dAtA[iNdEx]
-					iNdEx++
-					innerWire |= (uint64(b) & 0x7F) << shift
-					if b < 0x80 {
-						break
-					}
-				}
-				innerWireType := int(innerWire & 0x7)
-				if innerWireType == 4 {
-					break
-				}
-				next, err := skipGenerated(dAtA[start:])
-				if err != nil {
-					return 0, err
-				}
-				iNdEx = start + next
-				if iNdEx < 0 {
-					return 0, ErrInvalidLengthGenerated
-				}
-			}
-			return iNdEx, nil
+			depth++
 		case 4:
-			return iNdEx, nil
+			if depth == 0 {
+				return 0, ErrUnexpectedEndOfGroupGenerated
+			}
+			depth--
 		case 5:
 			iNdEx += 4
-			return iNdEx, nil
 		default:
 			return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
 		}
+		if iNdEx < 0 {
+			return 0, ErrInvalidLengthGenerated
+		}
+		if depth == 0 {
+			return iNdEx, nil
+		}
 	}
-	panic("unreachable")
+	return 0, io.ErrUnexpectedEOF
 }
 
 var (
-	ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling")
-	ErrIntOverflowGenerated   = fmt.Errorf("proto: integer overflow")
+	ErrInvalidLengthGenerated        = fmt.Errorf("proto: negative length found during unmarshaling")
+	ErrIntOverflowGenerated          = fmt.Errorf("proto: integer overflow")
+	ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group")
 )
diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.proto b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.proto
index 19606666..59ce7437 100644
--- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.proto
+++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.proto
@@ -22,7 +22,6 @@ syntax = 'proto2';
 package k8s.io.apimachinery.pkg.apis.meta.v1beta1;
 
 import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto";
-import "k8s.io/apimachinery/pkg/runtime/generated.proto";
 import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto";
 
 // Package-wide variables from generator "generated".
diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/register.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/register.go
index 4b4acd72..8d11399f 100644
--- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/register.go
+++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/register.go
@@ -19,7 +19,6 @@ package v1beta1
 import (
 	"k8s.io/apimachinery/pkg/runtime"
 	"k8s.io/apimachinery/pkg/runtime/schema"
-	utilruntime "k8s.io/apimachinery/pkg/util/runtime"
 )
 
 // GroupName is the group name for this API.
@@ -33,12 +32,6 @@ func Kind(kind string) schema.GroupKind {
 	return SchemeGroupVersion.WithKind(kind).GroupKind()
 }
 
-// scheme is the registry for the common types that adhere to the meta v1beta1 API spec.
-var scheme = runtime.NewScheme()
-
-// ParameterCodec knows about query parameters used with the meta v1beta1 API spec.
-var ParameterCodec = runtime.NewParameterCodec(scheme)
-
 // AddMetaToScheme registers base meta types into schemas.
 func AddMetaToScheme(scheme *runtime.Scheme) error {
 	scheme.AddKnownTypes(SchemeGroupVersion,
@@ -48,14 +41,5 @@ func AddMetaToScheme(scheme *runtime.Scheme) error {
 		&PartialObjectMetadataList{},
 	)
 
-	return scheme.AddConversionFuncs(
-		Convert_Slice_string_To_v1beta1_IncludeObjectPolicy,
-	)
-}
-
-func init() {
-	utilruntime.Must(AddMetaToScheme(scheme))
-
-	// register manually. This usually goes through the SchemeBuilder, which we cannot use here.
-	utilruntime.Must(RegisterDefaults(scheme))
+	return nil
 }
diff --git a/vendor/k8s.io/apimachinery/pkg/conversion/converter.go b/vendor/k8s.io/apimachinery/pkg/conversion/converter.go
index bc615dc3..2d7c8bd1 100644
--- a/vendor/k8s.io/apimachinery/pkg/conversion/converter.go
+++ b/vendor/k8s.io/apimachinery/pkg/conversion/converter.go
@@ -54,7 +54,8 @@ type Converter struct {
 	generatedConversionFuncs ConversionFuncs
 
 	// Set of conversions that should be treated as a no-op
-	ignoredConversions map[typePair]struct{}
+	ignoredConversions        map[typePair]struct{}
+	ignoredUntypedConversions map[typePair]struct{}
 
 	// This is a map from a source field type and name, to a list of destination
 	// field type and name.
@@ -83,17 +84,23 @@ type Converter struct {
 // NewConverter creates a new Converter object.
 func NewConverter(nameFn NameFunc) *Converter {
 	c := &Converter{
-		conversionFuncs:          NewConversionFuncs(),
-		generatedConversionFuncs: NewConversionFuncs(),
-		ignoredConversions:       make(map[typePair]struct{}),
-		nameFunc:                 nameFn,
-		structFieldDests:         make(map[typeNamePair][]typeNamePair),
-		structFieldSources:       make(map[typeNamePair][]typeNamePair),
+		conversionFuncs:           NewConversionFuncs(),
+		generatedConversionFuncs:  NewConversionFuncs(),
+		ignoredConversions:        make(map[typePair]struct{}),
+		ignoredUntypedConversions: make(map[typePair]struct{}),
+		nameFunc:                  nameFn,
+		structFieldDests:          make(map[typeNamePair][]typeNamePair),
+		structFieldSources:        make(map[typeNamePair][]typeNamePair),
 
 		inputFieldMappingFuncs: make(map[reflect.Type]FieldMappingFunc),
 		inputDefaultFlags:      make(map[reflect.Type]FieldMatchingFlags),
 	}
-	c.RegisterConversionFunc(Convert_Slice_byte_To_Slice_byte)
+	c.RegisterUntypedConversionFunc(
+		(*[]byte)(nil), (*[]byte)(nil),
+		func(a, b interface{}, s Scope) error {
+			return Convert_Slice_byte_To_Slice_byte(a.(*[]byte), b.(*[]byte), s)
+		},
+	)
 	return c
 }
 
@@ -131,10 +138,6 @@ type Scope interface {
 	// parameters, you'll run out of stack space before anything useful happens.
 	Convert(src, dest interface{}, flags FieldMatchingFlags) error
 
-	// DefaultConvert performs the default conversion, without calling a conversion func
-	// on the current stack frame. This makes it safe to call from a conversion func.
-	DefaultConvert(src, dest interface{}, flags FieldMatchingFlags) error
-
 	// SrcTags and DestTags contain the struct tags that src and dest had, respectively.
 	// If the enclosing object was not a struct, then these will contain no tags, of course.
 	SrcTag() reflect.StructTag
@@ -153,31 +156,14 @@ type FieldMappingFunc func(key string, sourceTag, destTag reflect.StructTag) (so
 
 func NewConversionFuncs() ConversionFuncs {
 	return ConversionFuncs{
-		fns:     make(map[typePair]reflect.Value),
 		untyped: make(map[typePair]ConversionFunc),
 	}
 }
 
 type ConversionFuncs struct {
-	fns     map[typePair]reflect.Value
 	untyped map[typePair]ConversionFunc
 }
 
-// Add adds the provided conversion functions to the lookup table - they must have the signature
-// `func(type1, type2, Scope) error`. Functions are added in the order passed and will override
-// previously registered pairs.
-func (c ConversionFuncs) Add(fns ...interface{}) error {
-	for _, fn := range fns {
-		fv := reflect.ValueOf(fn)
-		ft := fv.Type()
-		if err := verifyConversionFunctionSignature(ft); err != nil {
-			return err
-		}
-		c.fns[typePair{ft.In(0).Elem(), ft.In(1).Elem()}] = fv
-	}
-	return nil
-}
-
 // AddUntyped adds the provided conversion function to the lookup table for the types that are
 // supplied as a and b. a and b must be pointers or an error is returned. This method overwrites
 // previously defined functions.
@@ -197,12 +183,6 @@ func (c ConversionFuncs) AddUntyped(a, b interface{}, fn ConversionFunc) error {
 // both other and c, with other conversions taking precedence.
 func (c ConversionFuncs) Merge(other ConversionFuncs) ConversionFuncs {
 	merged := NewConversionFuncs()
-	for k, v := range c.fns {
-		merged.fns[k] = v
-	}
-	for k, v := range other.fns {
-		merged.fns[k] = v
-	}
 	for k, v := range c.untyped {
 		merged.untyped[k] = v
 	}
@@ -290,12 +270,6 @@ func (s *scope) Convert(src, dest interface{}, flags FieldMatchingFlags) error {
 	return s.converter.Convert(src, dest, flags, s.meta)
 }
 
-// DefaultConvert continues a conversion, performing a default conversion (no conversion func)
-// for the current stack frame.
-func (s *scope) DefaultConvert(src, dest interface{}, flags FieldMatchingFlags) error {
-	return s.converter.DefaultConvert(src, dest, flags, s.meta)
-}
-
 // SrcTag returns the tag of the struct containing the current source item, if any.
 func (s *scope) SrcTag() reflect.StructTag {
 	return s.srcStack.top().tag
@@ -360,29 +334,6 @@ func verifyConversionFunctionSignature(ft reflect.Type) error {
 	return nil
 }
 
-// RegisterConversionFunc registers a conversion func with the
-// Converter. conversionFunc must take three parameters: a pointer to the input
-// type, a pointer to the output type, and a conversion.Scope (which should be
-// used if recursive conversion calls are desired).  It must return an error.
-//
-// Example:
-// c.RegisterConversionFunc(
-//         func(in *Pod, out *v1.Pod, s Scope) error {
-//                 // conversion logic...
-//                 return nil
-//          })
-// DEPRECATED: Will be removed in favor of RegisterUntypedConversionFunc
-func (c *Converter) RegisterConversionFunc(conversionFunc interface{}) error {
-	return c.conversionFuncs.Add(conversionFunc)
-}
-
-// Similar to RegisterConversionFunc, but registers conversion function that were
-// automatically generated.
-// DEPRECATED: Will be removed in favor of RegisterGeneratedUntypedConversionFunc
-func (c *Converter) RegisterGeneratedConversionFunc(conversionFunc interface{}) error {
-	return c.generatedConversionFuncs.Add(conversionFunc)
-}
-
 // RegisterUntypedConversionFunc registers a function that converts between a and b by passing objects of those
 // types to the provided function. The function *must* accept objects of a and b - this machinery will not enforce
 // any other guarantee.
@@ -409,6 +360,7 @@ func (c *Converter) RegisterIgnoredConversion(from, to interface{}) error {
 		return fmt.Errorf("expected pointer arg for 'to' param 1, got: %v", typeTo)
 	}
 	c.ignoredConversions[typePair{typeFrom.Elem(), typeTo.Elem()}] = struct{}{}
+	c.ignoredUntypedConversions[typePair{typeFrom, typeTo}] = struct{}{}
 	return nil
 }
 
@@ -470,18 +422,6 @@ func (c *Converter) Convert(src, dest interface{}, flags FieldMatchingFlags, met
 	return c.doConversion(src, dest, flags, meta, c.convert)
 }
 
-// DefaultConvert will translate src to dest if it knows how. Both must be pointers.
-// No conversion func is used. If the default copying mechanism
-// doesn't work on this type pair, an error will be returned.
-// Read the comments on the various FieldMatchingFlags constants to understand
-// what the 'flags' parameter does.
-// 'meta' is given to allow you to pass information to conversion functions,
-// it is not used by DefaultConvert() other than storing it in the scope.
-// Not safe for objects with cyclic references!
-func (c *Converter) DefaultConvert(src, dest interface{}, flags FieldMatchingFlags, meta *Meta) error {
-	return c.doConversion(src, dest, flags, meta, c.defaultConvert)
-}
-
 type conversionFunc func(sv, dv reflect.Value, scope *scope) error
 
 func (c *Converter) doConversion(src, dest interface{}, flags FieldMatchingFlags, meta *Meta, f conversionFunc) error {
@@ -491,6 +431,11 @@ func (c *Converter) doConversion(src, dest interface{}, flags FieldMatchingFlags
 		flags:     flags,
 		meta:      meta,
 	}
+
+	// ignore conversions of this type
+	if _, ok := c.ignoredUntypedConversions[pair]; ok {
+		return nil
+	}
 	if fn, ok := c.conversionFuncs.untyped[pair]; ok {
 		return fn(src, dest, scope)
 	}
@@ -517,33 +462,20 @@ func (c *Converter) doConversion(src, dest interface{}, flags FieldMatchingFlags
 	return f(sv, dv, scope)
 }
 
-// callCustom calls 'custom' with sv & dv. custom must be a conversion function.
-func (c *Converter) callCustom(sv, dv, custom reflect.Value, scope *scope) error {
-	if !sv.CanAddr() {
-		sv2 := reflect.New(sv.Type())
-		sv2.Elem().Set(sv)
-		sv = sv2
-	} else {
-		sv = sv.Addr()
-	}
+// callUntyped calls predefined conversion func.
+func (c *Converter) callUntyped(sv, dv reflect.Value, f ConversionFunc, scope *scope) error {
 	if !dv.CanAddr() {
-		if !dv.CanSet() {
-			return scope.errorf("can't addr or set dest.")
-		}
-		dvOrig := dv
-		dv := reflect.New(dvOrig.Type())
-		defer func() { dvOrig.Set(dv) }()
+		return scope.errorf("cant addr dest")
+	}
+	var svPointer reflect.Value
+	if sv.CanAddr() {
+		svPointer = sv.Addr()
 	} else {
-		dv = dv.Addr()
+		svPointer = reflect.New(sv.Type())
+		svPointer.Elem().Set(sv)
 	}
-	args := []reflect.Value{sv, dv, reflect.ValueOf(scope)}
-	ret := custom.Call(args)[0].Interface()
-	// This convolution is necessary because nil interfaces won't convert
-	// to errors.
-	if ret == nil {
-		return nil
-	}
-	return ret.(error)
+	dvPointer := dv.Addr()
+	return f(svPointer.Interface(), dvPointer.Interface(), scope)
 }
 
 // convert recursively copies sv into dv, calling an appropriate conversion function if
@@ -561,27 +493,14 @@ func (c *Converter) convert(sv, dv reflect.Value, scope *scope) error {
 	}
 
 	// Convert sv to dv.
-	if fv, ok := c.conversionFuncs.fns[pair]; ok {
-		if c.Debug != nil {
-			c.Debug.Logf("Calling custom conversion of '%v' to '%v'", st, dt)
-		}
-		return c.callCustom(sv, dv, fv, scope)
+	pair = typePair{reflect.PtrTo(sv.Type()), reflect.PtrTo(dv.Type())}
+	if f, ok := c.conversionFuncs.untyped[pair]; ok {
+		return c.callUntyped(sv, dv, f, scope)
 	}
-	if fv, ok := c.generatedConversionFuncs.fns[pair]; ok {
-		if c.Debug != nil {
-			c.Debug.Logf("Calling generated conversion of '%v' to '%v'", st, dt)
-		}
-		return c.callCustom(sv, dv, fv, scope)
+	if f, ok := c.generatedConversionFuncs.untyped[pair]; ok {
+		return c.callUntyped(sv, dv, f, scope)
 	}
 
-	return c.defaultConvert(sv, dv, scope)
-}
-
-// defaultConvert recursively copies sv into dv. no conversion function is called
-// for the current stack frame (but conversion functions may be called for nested objects)
-func (c *Converter) defaultConvert(sv, dv reflect.Value, scope *scope) error {
-	dt, st := dv.Type(), sv.Type()
-
 	if !dv.CanSet() {
 		return scope.errorf("Cannot set dest. (Tried to deep copy something with unexported fields?)")
 	}
diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/conversion.go b/vendor/k8s.io/apimachinery/pkg/runtime/conversion.go
index 0947dce7..d04d701f 100644
--- a/vendor/k8s.io/apimachinery/pkg/runtime/conversion.go
+++ b/vendor/k8s.io/apimachinery/pkg/runtime/conversion.go
@@ -53,14 +53,6 @@ func JSONKeyMapper(key string, sourceTag, destTag reflect.StructTag) (string, st
 	return key, key
 }
 
-// DefaultStringConversions are helpers for converting []string and string to real values.
-var DefaultStringConversions = []interface{}{
-	Convert_Slice_string_To_string,
-	Convert_Slice_string_To_int,
-	Convert_Slice_string_To_bool,
-	Convert_Slice_string_To_int64,
-}
-
 func Convert_Slice_string_To_string(in *[]string, out *string, s conversion.Scope) error {
 	if len(*in) == 0 {
 		*out = ""
@@ -178,3 +170,27 @@ func Convert_Slice_string_To_Pointer_int64(in *[]string, out **int64, s conversi
 	*out = &i
 	return nil
 }
+
+func RegisterStringConversions(s *Scheme) error {
+	if err := s.AddConversionFunc((*[]string)(nil), (*string)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_Slice_string_To_string(a.(*[]string), b.(*string), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddConversionFunc((*[]string)(nil), (*int)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_Slice_string_To_int(a.(*[]string), b.(*int), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddConversionFunc((*[]string)(nil), (*bool)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_Slice_string_To_bool(a.(*[]string), b.(*bool), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddConversionFunc((*[]string)(nil), (*int64)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_Slice_string_To_int64(a.(*[]string), b.(*int64), scope)
+	}); err != nil {
+		return err
+	}
+	return nil
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/converter.go b/vendor/k8s.io/apimachinery/pkg/runtime/converter.go
index b3e8a53b..918d0831 100644
--- a/vendor/k8s.io/apimachinery/pkg/runtime/converter.go
+++ b/vendor/k8s.io/apimachinery/pkg/runtime/converter.go
@@ -17,7 +17,6 @@ limitations under the License.
 package runtime
 
 import (
-	"bytes"
 	encodingjson "encoding/json"
 	"fmt"
 	"math"
@@ -32,6 +31,7 @@ import (
 	"k8s.io/apimachinery/pkg/conversion"
 	"k8s.io/apimachinery/pkg/util/json"
 	utilruntime "k8s.io/apimachinery/pkg/util/runtime"
+	"sigs.k8s.io/structured-merge-diff/v3/value"
 
 	"k8s.io/klog"
 )
@@ -68,13 +68,8 @@ func newFieldsCache() *fieldsCache {
 }
 
 var (
-	marshalerType          = reflect.TypeOf(new(encodingjson.Marshaler)).Elem()
-	unmarshalerType        = reflect.TypeOf(new(encodingjson.Unmarshaler)).Elem()
 	mapStringInterfaceType = reflect.TypeOf(map[string]interface{}{})
 	stringType             = reflect.TypeOf(string(""))
-	int64Type              = reflect.TypeOf(int64(0))
-	float64Type            = reflect.TypeOf(float64(0))
-	boolType               = reflect.TypeOf(bool(false))
 	fieldCache             = newFieldsCache()
 
 	// DefaultUnstructuredConverter performs unstructured to Go typed object conversions.
@@ -208,13 +203,9 @@ func fromUnstructured(sv, dv reflect.Value) error {
 	}
 
 	// Check if the object has a custom JSON marshaller/unmarshaller.
-	if reflect.PtrTo(dt).Implements(unmarshalerType) {
-		data, err := json.Marshal(sv.Interface())
-		if err != nil {
-			return fmt.Errorf("error encoding %s to json: %v", st.String(), err)
-		}
-		unmarshaler := dv.Addr().Interface().(encodingjson.Unmarshaler)
-		return unmarshaler.UnmarshalJSON(data)
+	entry := value.TypeReflectEntryOf(dv.Type())
+	if entry.CanConvertFromUnstructured() {
+		return entry.FromUnstructured(sv, dv)
 	}
 
 	switch dt.Kind() {
@@ -256,6 +247,7 @@ func fieldInfoFromField(structType reflect.Type, field int) *fieldInfo {
 		for i := range items {
 			if items[i] == "omitempty" {
 				info.omitempty = true
+				break
 			}
 		}
 	}
@@ -483,112 +475,28 @@ func toUnstructuredViaJSON(obj interface{}, u *map[string]interface{}) error {
 	return json.Unmarshal(data, u)
 }
 
-var (
-	nullBytes  = []byte("null")
-	trueBytes  = []byte("true")
-	falseBytes = []byte("false")
-)
-
-func getMarshaler(v reflect.Value) (encodingjson.Marshaler, bool) {
-	// Check value receivers if v is not a pointer and pointer receivers if v is a pointer
-	if v.Type().Implements(marshalerType) {
-		return v.Interface().(encodingjson.Marshaler), true
-	}
-	// Check pointer receivers if v is not a pointer
-	if v.Kind() != reflect.Ptr && v.CanAddr() {
-		v = v.Addr()
-		if v.Type().Implements(marshalerType) {
-			return v.Interface().(encodingjson.Marshaler), true
-		}
-	}
-	return nil, false
-}
-
 func toUnstructured(sv, dv reflect.Value) error {
-	// Check if the object has a custom JSON marshaller/unmarshaller.
-	if marshaler, ok := getMarshaler(sv); ok {
-		if sv.Kind() == reflect.Ptr && sv.IsNil() {
-			// We're done - we don't need to store anything.
-			return nil
-		}
-
-		data, err := marshaler.MarshalJSON()
+	// Check if the object has a custom string converter.
+	entry := value.TypeReflectEntryOf(sv.Type())
+	if entry.CanConvertToUnstructured() {
+		v, err := entry.ToUnstructured(sv)
 		if err != nil {
 			return err
 		}
-		switch {
-		case len(data) == 0:
-			return fmt.Errorf("error decoding from json: empty value")
-
-		case bytes.Equal(data, nullBytes):
-			// We're done - we don't need to store anything.
-
-		case bytes.Equal(data, trueBytes):
-			dv.Set(reflect.ValueOf(true))
-
-		case bytes.Equal(data, falseBytes):
-			dv.Set(reflect.ValueOf(false))
-
-		case data[0] == '"':
-			var result string
-			err := json.Unmarshal(data, &result)
-			if err != nil {
-				return fmt.Errorf("error decoding string from json: %v", err)
-			}
-			dv.Set(reflect.ValueOf(result))
-
-		case data[0] == '{':
-			result := make(map[string]interface{})
-			err := json.Unmarshal(data, &result)
-			if err != nil {
-				return fmt.Errorf("error decoding object from json: %v", err)
-			}
-			dv.Set(reflect.ValueOf(result))
-
-		case data[0] == '[':
-			result := make([]interface{}, 0)
-			err := json.Unmarshal(data, &result)
-			if err != nil {
-				return fmt.Errorf("error decoding array from json: %v", err)
-			}
-			dv.Set(reflect.ValueOf(result))
-
-		default:
-			var (
-				resultInt   int64
-				resultFloat float64
-				err         error
-			)
-			if err = json.Unmarshal(data, &resultInt); err == nil {
-				dv.Set(reflect.ValueOf(resultInt))
-			} else if err = json.Unmarshal(data, &resultFloat); err == nil {
-				dv.Set(reflect.ValueOf(resultFloat))
-			} else {
-				return fmt.Errorf("error decoding number from json: %v", err)
-			}
+		if v != nil {
+			dv.Set(reflect.ValueOf(v))
 		}
-
 		return nil
 	}
-
-	st, dt := sv.Type(), dv.Type()
+	st := sv.Type()
 	switch st.Kind() {
 	case reflect.String:
-		if dt.Kind() == reflect.Interface && dv.NumMethod() == 0 {
-			dv.Set(reflect.New(stringType))
-		}
 		dv.Set(reflect.ValueOf(sv.String()))
 		return nil
 	case reflect.Bool:
-		if dt.Kind() == reflect.Interface && dv.NumMethod() == 0 {
-			dv.Set(reflect.New(boolType))
-		}
 		dv.Set(reflect.ValueOf(sv.Bool()))
 		return nil
 	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
-		if dt.Kind() == reflect.Interface && dv.NumMethod() == 0 {
-			dv.Set(reflect.New(int64Type))
-		}
 		dv.Set(reflect.ValueOf(sv.Int()))
 		return nil
 	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
@@ -596,15 +504,9 @@ func toUnstructured(sv, dv reflect.Value) error {
 		if uVal > math.MaxInt64 {
 			return fmt.Errorf("unsigned value %d does not fit into int64 (overflow)", uVal)
 		}
-		if dt.Kind() == reflect.Interface && dv.NumMethod() == 0 {
-			dv.Set(reflect.New(int64Type))
-		}
 		dv.Set(reflect.ValueOf(int64(uVal)))
 		return nil
 	case reflect.Float32, reflect.Float64:
-		if dt.Kind() == reflect.Interface && dv.NumMethod() == 0 {
-			dv.Set(reflect.New(float64Type))
-		}
 		dv.Set(reflect.ValueOf(sv.Float()))
 		return nil
 	case reflect.Map:
diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/embedded.go b/vendor/k8s.io/apimachinery/pkg/runtime/embedded.go
index db11eb8b..7251e65f 100644
--- a/vendor/k8s.io/apimachinery/pkg/runtime/embedded.go
+++ b/vendor/k8s.io/apimachinery/pkg/runtime/embedded.go
@@ -134,9 +134,16 @@ func Convert_runtime_RawExtension_To_runtime_Object(in *RawExtension, out *Objec
 	return nil
 }
 
-func DefaultEmbeddedConversions() []interface{} {
-	return []interface{}{
-		Convert_runtime_Object_To_runtime_RawExtension,
-		Convert_runtime_RawExtension_To_runtime_Object,
+func RegisterEmbeddedConversions(s *Scheme) error {
+	if err := s.AddConversionFunc((*Object)(nil), (*RawExtension)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_runtime_Object_To_runtime_RawExtension(a.(*Object), b.(*RawExtension), scope)
+	}); err != nil {
+		return err
 	}
+	if err := s.AddConversionFunc((*RawExtension)(nil), (*Object)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_runtime_RawExtension_To_runtime_Object(a.(*RawExtension), b.(*Object), scope)
+	}); err != nil {
+		return err
+	}
+	return nil
 }
diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/generated.pb.go b/vendor/k8s.io/apimachinery/pkg/runtime/generated.pb.go
index af2f076b..07197181 100644
--- a/vendor/k8s.io/apimachinery/pkg/runtime/generated.pb.go
+++ b/vendor/k8s.io/apimachinery/pkg/runtime/generated.pb.go
@@ -40,7 +40,7 @@ var _ = math.Inf
 // is compatible with the proto package it is being compiled against.
 // A compilation error at this line likely means your copy of the
 // proto package needs to be updated.
-const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
+const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
 
 func (m *RawExtension) Reset()      { *m = RawExtension{} }
 func (*RawExtension) ProtoMessage() {}
@@ -772,6 +772,7 @@ func (m *Unknown) Unmarshal(dAtA []byte) error {
 func skipGenerated(dAtA []byte) (n int, err error) {
 	l := len(dAtA)
 	iNdEx := 0
+	depth := 0
 	for iNdEx < l {
 		var wire uint64
 		for shift := uint(0); ; shift += 7 {
@@ -803,10 +804,8 @@ func skipGenerated(dAtA []byte) (n int, err error) {
 					break
 				}
 			}
-			return iNdEx, nil
 		case 1:
 			iNdEx += 8
-			return iNdEx, nil
 		case 2:
 			var length int
 			for shift := uint(0); ; shift += 7 {
@@ -827,55 +826,30 @@ func skipGenerated(dAtA []byte) (n int, err error) {
 				return 0, ErrInvalidLengthGenerated
 			}
 			iNdEx += length
-			if iNdEx < 0 {
-				return 0, ErrInvalidLengthGenerated
-			}
-			return iNdEx, nil
 		case 3:
-			for {
-				var innerWire uint64
-				var start int = iNdEx
-				for shift := uint(0); ; shift += 7 {
-					if shift >= 64 {
-						return 0, ErrIntOverflowGenerated
-					}
-					if iNdEx >= l {
-						return 0, io.ErrUnexpectedEOF
-					}
-					b := dAtA[iNdEx]
-					iNdEx++
-					innerWire |= (uint64(b) & 0x7F) << shift
-					if b < 0x80 {
-						break
-					}
-				}
-				innerWireType := int(innerWire & 0x7)
-				if innerWireType == 4 {
-					break
-				}
-				next, err := skipGenerated(dAtA[start:])
-				if err != nil {
-					return 0, err
-				}
-				iNdEx = start + next
-				if iNdEx < 0 {
-					return 0, ErrInvalidLengthGenerated
-				}
-			}
-			return iNdEx, nil
+			depth++
 		case 4:
-			return iNdEx, nil
+			if depth == 0 {
+				return 0, ErrUnexpectedEndOfGroupGenerated
+			}
+			depth--
 		case 5:
 			iNdEx += 4
-			return iNdEx, nil
 		default:
 			return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
 		}
+		if iNdEx < 0 {
+			return 0, ErrInvalidLengthGenerated
+		}
+		if depth == 0 {
+			return iNdEx, nil
+		}
 	}
-	panic("unreachable")
+	return 0, io.ErrUnexpectedEOF
 }
 
 var (
-	ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling")
-	ErrIntOverflowGenerated   = fmt.Errorf("proto: integer overflow")
+	ErrInvalidLengthGenerated        = fmt.Errorf("proto: negative length found during unmarshaling")
+	ErrIntOverflowGenerated          = fmt.Errorf("proto: integer overflow")
+	ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group")
 )
diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.pb.go b/vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.pb.go
index a7276649..29d3ac45 100644
--- a/vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.pb.go
+++ b/vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.pb.go
@@ -36,7 +36,7 @@ var _ = math.Inf
 // is compatible with the proto package it is being compiled against.
 // A compilation error at this line likely means your copy of the
 // proto package needs to be updated.
-const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
+const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
 
 func init() {
 	proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.proto", fileDescriptor_0462724132518e0d)
diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/scheme.go b/vendor/k8s.io/apimachinery/pkg/runtime/scheme.go
index fd37e293..4b739ec3 100644
--- a/vendor/k8s.io/apimachinery/pkg/runtime/scheme.go
+++ b/vendor/k8s.io/apimachinery/pkg/runtime/scheme.go
@@ -102,10 +102,10 @@ func NewScheme() *Scheme {
 	}
 	s.converter = conversion.NewConverter(s.nameFunc)
 
-	utilruntime.Must(s.AddConversionFuncs(DefaultEmbeddedConversions()...))
+	// Enable couple default conversions by default.
+	utilruntime.Must(RegisterEmbeddedConversions(s))
+	utilruntime.Must(RegisterStringConversions(s))
 
-	// Enable map[string][]string conversions by default
-	utilruntime.Must(s.AddConversionFuncs(DefaultStringConversions...))
 	utilruntime.Must(s.RegisterInputDefaults(&map[string][]string{}, JSONKeyMapper, conversion.AllowDifferentFieldTypeNames|conversion.IgnoreMissingFields))
 	utilruntime.Must(s.RegisterInputDefaults(&url.Values{}, JSONKeyMapper, conversion.AllowDifferentFieldTypeNames|conversion.IgnoreMissingFields))
 	return s
@@ -308,45 +308,6 @@ func (s *Scheme) AddIgnoredConversionType(from, to interface{}) error {
 	return s.converter.RegisterIgnoredConversion(from, to)
 }
 
-// AddConversionFuncs adds functions to the list of conversion functions. The given
-// functions should know how to convert between two of your API objects, or their
-// sub-objects. We deduce how to call these functions from the types of their two
-// parameters; see the comment for Converter.Register.
-//
-// Note that, if you need to copy sub-objects that didn't change, you can use the
-// conversion.Scope object that will be passed to your conversion function.
-// Additionally, all conversions started by Scheme will set the SrcVersion and
-// DestVersion fields on the Meta object. Example:
-//
-// s.AddConversionFuncs(
-//	func(in *InternalObject, out *ExternalObject, scope conversion.Scope) error {
-//		// You can depend on Meta() being non-nil, and this being set to
-//		// the source version, e.g., ""
-//		s.Meta().SrcVersion
-//		// You can depend on this being set to the destination version,
-//		// e.g., "v1".
-//		s.Meta().DestVersion
-//		// Call scope.Convert to copy sub-fields.
-//		s.Convert(&in.SubFieldThatMoved, &out.NewLocation.NewName, 0)
-//		return nil
-//	},
-// )
-//
-// (For more detail about conversion functions, see Converter.Register's comment.)
-//
-// Also note that the default behavior, if you don't add a conversion function, is to
-// sanely copy fields that have the same names and same type names. It's OK if the
-// destination type has extra fields, but it must not remove any. So you only need to
-// add conversion functions for things with changed/removed fields.
-func (s *Scheme) AddConversionFuncs(conversionFuncs ...interface{}) error {
-	for _, f := range conversionFuncs {
-		if err := s.converter.RegisterConversionFunc(f); err != nil {
-			return err
-		}
-	}
-	return nil
-}
-
 // AddConversionFunc registers a function that converts between a and b by passing objects of those
 // types to the provided function. The function *must* accept objects of a and b - this machinery will not enforce
 // any other guarantee.
diff --git a/vendor/k8s.io/apimachinery/pkg/util/clock/clock.go b/vendor/k8s.io/apimachinery/pkg/util/clock/clock.go
index 1689e62e..6cf13d83 100644
--- a/vendor/k8s.io/apimachinery/pkg/util/clock/clock.go
+++ b/vendor/k8s.io/apimachinery/pkg/util/clock/clock.go
@@ -52,23 +52,26 @@ func (RealClock) Since(ts time.Time) time.Duration {
 	return time.Since(ts)
 }
 
-// Same as time.After(d).
+// After is the same as time.After(d).
 func (RealClock) After(d time.Duration) <-chan time.Time {
 	return time.After(d)
 }
 
+// NewTimer returns a new Timer.
 func (RealClock) NewTimer(d time.Duration) Timer {
 	return &realTimer{
 		timer: time.NewTimer(d),
 	}
 }
 
+// NewTicker returns a new Ticker.
 func (RealClock) NewTicker(d time.Duration) Ticker {
 	return &realTicker{
 		ticker: time.NewTicker(d),
 	}
 }
 
+// Sleep pauses the RealClock for duration d.
 func (RealClock) Sleep(d time.Duration) {
 	time.Sleep(d)
 }
@@ -94,12 +97,14 @@ type fakeClockWaiter struct {
 	destChan      chan time.Time
 }
 
+// NewFakePassiveClock returns a new FakePassiveClock.
 func NewFakePassiveClock(t time.Time) *FakePassiveClock {
 	return &FakePassiveClock{
 		time: t,
 	}
 }
 
+// NewFakeClock returns a new FakeClock
 func NewFakeClock(t time.Time) *FakeClock {
 	return &FakeClock{
 		FakePassiveClock: *NewFakePassiveClock(t),
@@ -120,14 +125,14 @@ func (f *FakePassiveClock) Since(ts time.Time) time.Duration {
 	return f.time.Sub(ts)
 }
 
-// Sets the time.
+// SetTime sets the time on the FakePassiveClock.
 func (f *FakePassiveClock) SetTime(t time.Time) {
 	f.lock.Lock()
 	defer f.lock.Unlock()
 	f.time = t
 }
 
-// Fake version of time.After(d).
+// After is the Fake version of time.After(d).
 func (f *FakeClock) After(d time.Duration) <-chan time.Time {
 	f.lock.Lock()
 	defer f.lock.Unlock()
@@ -140,7 +145,7 @@ func (f *FakeClock) After(d time.Duration) <-chan time.Time {
 	return ch
 }
 
-// Fake version of time.NewTimer(d).
+// NewTimer is the Fake version of time.NewTimer(d).
 func (f *FakeClock) NewTimer(d time.Duration) Timer {
 	f.lock.Lock()
 	defer f.lock.Unlock()
@@ -157,6 +162,7 @@ func (f *FakeClock) NewTimer(d time.Duration) Timer {
 	return timer
 }
 
+// NewTicker returns a new Ticker.
 func (f *FakeClock) NewTicker(d time.Duration) Ticker {
 	f.lock.Lock()
 	defer f.lock.Unlock()
@@ -174,14 +180,14 @@ func (f *FakeClock) NewTicker(d time.Duration) Ticker {
 	}
 }
 
-// Move clock by Duration, notify anyone that's called After, Tick, or NewTimer
+// Step moves clock by Duration, notifies anyone that's called After, Tick, or NewTimer
 func (f *FakeClock) Step(d time.Duration) {
 	f.lock.Lock()
 	defer f.lock.Unlock()
 	f.setTimeLocked(f.time.Add(d))
 }
 
-// Sets the time.
+// SetTime sets the time on a FakeClock.
 func (f *FakeClock) SetTime(t time.Time) {
 	f.lock.Lock()
 	defer f.lock.Unlock()
@@ -219,7 +225,7 @@ func (f *FakeClock) setTimeLocked(t time.Time) {
 	f.waiters = newWaiters
 }
 
-// Returns true if After has been called on f but not yet satisfied (so you can
+// HasWaiters returns true if After has been called on f but not yet satisfied (so you can
 // write race-free tests).
 func (f *FakeClock) HasWaiters() bool {
 	f.lock.RLock()
@@ -227,6 +233,7 @@ func (f *FakeClock) HasWaiters() bool {
 	return len(f.waiters) > 0
 }
 
+// Sleep pauses the FakeClock for duration d.
 func (f *FakeClock) Sleep(d time.Duration) {
 	f.Step(d)
 }
@@ -248,24 +255,25 @@ func (i *IntervalClock) Since(ts time.Time) time.Duration {
 	return i.Time.Sub(ts)
 }
 
-// Unimplemented, will panic.
+// After is currently unimplemented, will panic.
 // TODO: make interval clock use FakeClock so this can be implemented.
 func (*IntervalClock) After(d time.Duration) <-chan time.Time {
 	panic("IntervalClock doesn't implement After")
 }
 
-// Unimplemented, will panic.
+// NewTimer is currently unimplemented, will panic.
 // TODO: make interval clock use FakeClock so this can be implemented.
 func (*IntervalClock) NewTimer(d time.Duration) Timer {
 	panic("IntervalClock doesn't implement NewTimer")
 }
 
-// Unimplemented, will panic.
+// NewTicker is currently unimplemented, will panic.
 // TODO: make interval clock use FakeClock so this can be implemented.
 func (*IntervalClock) NewTicker(d time.Duration) Ticker {
 	panic("IntervalClock doesn't implement NewTicker")
 }
 
+// Sleep is currently unimplemented; will panic.
 func (*IntervalClock) Sleep(d time.Duration) {
 	panic("IntervalClock doesn't implement Sleep")
 }
@@ -355,6 +363,7 @@ func (f *fakeTimer) Reset(d time.Duration) bool {
 	return false
 }
 
+// Ticker defines the Ticker interface
 type Ticker interface {
 	C() <-chan time.Time
 	Stop()
diff --git a/vendor/k8s.io/apimachinery/pkg/util/duration/duration.go b/vendor/k8s.io/apimachinery/pkg/util/duration/duration.go
new file mode 100644
index 00000000..961ec5ed
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/util/duration/duration.go
@@ -0,0 +1,89 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package duration
+
+import (
+	"fmt"
+	"time"
+)
+
+// ShortHumanDuration returns a succint representation of the provided duration
+// with limited precision for consumption by humans.
+func ShortHumanDuration(d time.Duration) string {
+	// Allow deviation no more than 2 seconds(excluded) to tolerate machine time
+	// inconsistence, it can be considered as almost now.
+	if seconds := int(d.Seconds()); seconds < -1 {
+		return fmt.Sprintf("<invalid>")
+	} else if seconds < 0 {
+		return fmt.Sprintf("0s")
+	} else if seconds < 60 {
+		return fmt.Sprintf("%ds", seconds)
+	} else if minutes := int(d.Minutes()); minutes < 60 {
+		return fmt.Sprintf("%dm", minutes)
+	} else if hours := int(d.Hours()); hours < 24 {
+		return fmt.Sprintf("%dh", hours)
+	} else if hours < 24*365 {
+		return fmt.Sprintf("%dd", hours/24)
+	}
+	return fmt.Sprintf("%dy", int(d.Hours()/24/365))
+}
+
+// HumanDuration returns a succint representation of the provided duration
+// with limited precision for consumption by humans. It provides ~2-3 significant
+// figures of duration.
+func HumanDuration(d time.Duration) string {
+	// Allow deviation no more than 2 seconds(excluded) to tolerate machine time
+	// inconsistence, it can be considered as almost now.
+	if seconds := int(d.Seconds()); seconds < -1 {
+		return fmt.Sprintf("<invalid>")
+	} else if seconds < 0 {
+		return fmt.Sprintf("0s")
+	} else if seconds < 60*2 {
+		return fmt.Sprintf("%ds", seconds)
+	}
+	minutes := int(d / time.Minute)
+	if minutes < 10 {
+		s := int(d/time.Second) % 60
+		if s == 0 {
+			return fmt.Sprintf("%dm", minutes)
+		}
+		return fmt.Sprintf("%dm%ds", minutes, s)
+	} else if minutes < 60*3 {
+		return fmt.Sprintf("%dm", minutes)
+	}
+	hours := int(d / time.Hour)
+	if hours < 8 {
+		m := int(d/time.Minute) % 60
+		if m == 0 {
+			return fmt.Sprintf("%dh", hours)
+		}
+		return fmt.Sprintf("%dh%dm", hours, m)
+	} else if hours < 48 {
+		return fmt.Sprintf("%dh", hours)
+	} else if hours < 24*8 {
+		h := hours % 24
+		if h == 0 {
+			return fmt.Sprintf("%dd", hours/24)
+		}
+		return fmt.Sprintf("%dd%dh", hours/24, h)
+	} else if hours < 24*365*2 {
+		return fmt.Sprintf("%dd", hours/24)
+	} else if hours < 24*365*8 {
+		return fmt.Sprintf("%dy%dd", hours/24/365, (hours/24)%365)
+	}
+	return fmt.Sprintf("%dy", int(hours/24/365))
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/util/errors/errors.go b/vendor/k8s.io/apimachinery/pkg/util/errors/errors.go
index 62a73f34..5bafc218 100644
--- a/vendor/k8s.io/apimachinery/pkg/util/errors/errors.go
+++ b/vendor/k8s.io/apimachinery/pkg/util/errors/errors.go
@@ -28,9 +28,14 @@ type MessageCountMap map[string]int
 
 // Aggregate represents an object that contains multiple errors, but does not
 // necessarily have singular semantic meaning.
+// The aggregate can be used with `errors.Is()` to check for the occurrence of
+// a specific error type.
+// Errors.As() is not supported, because the caller presumably cares about a
+// specific error of potentially multiple that match the given type.
 type Aggregate interface {
 	error
 	Errors() []error
+	Is(error) bool
 }
 
 // NewAggregate converts a slice of errors into an Aggregate interface, which
@@ -71,16 +76,17 @@ func (agg aggregate) Error() string {
 	}
 	seenerrs := sets.NewString()
 	result := ""
-	agg.visit(func(err error) {
+	agg.visit(func(err error) bool {
 		msg := err.Error()
 		if seenerrs.Has(msg) {
-			return
+			return false
 		}
 		seenerrs.Insert(msg)
 		if len(seenerrs) > 1 {
 			result += ", "
 		}
 		result += msg
+		return false
 	})
 	if len(seenerrs) == 1 {
 		return result
@@ -88,19 +94,33 @@ func (agg aggregate) Error() string {
 	return "[" + result + "]"
 }
 
-func (agg aggregate) visit(f func(err error)) {
+func (agg aggregate) Is(target error) bool {
+	return agg.visit(func(err error) bool {
+		return errors.Is(err, target)
+	})
+}
+
+func (agg aggregate) visit(f func(err error) bool) bool {
 	for _, err := range agg {
 		switch err := err.(type) {
 		case aggregate:
-			err.visit(f)
+			if match := err.visit(f); match {
+				return match
+			}
 		case Aggregate:
 			for _, nestedErr := range err.Errors() {
-				f(nestedErr)
+				if match := f(nestedErr); match {
+					return match
+				}
 			}
 		default:
-			f(err)
+			if match := f(err); match {
+				return match
+			}
 		}
 	}
+
+	return false
 }
 
 // Errors is part of the Aggregate interface.
diff --git a/vendor/k8s.io/apimachinery/pkg/util/httpstream/doc.go b/vendor/k8s.io/apimachinery/pkg/util/httpstream/doc.go
new file mode 100644
index 00000000..5893df5b
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/util/httpstream/doc.go
@@ -0,0 +1,19 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package httpstream adds multiplexed streaming support to HTTP requests and
+// responses via connection upgrades.
+package httpstream // import "k8s.io/apimachinery/pkg/util/httpstream"
diff --git a/vendor/k8s.io/apimachinery/pkg/util/httpstream/httpstream.go b/vendor/k8s.io/apimachinery/pkg/util/httpstream/httpstream.go
new file mode 100644
index 00000000..9d5fdeec
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/util/httpstream/httpstream.go
@@ -0,0 +1,145 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package httpstream
+
+import (
+	"fmt"
+	"io"
+	"net/http"
+	"strings"
+	"time"
+)
+
+const (
+	HeaderConnection               = "Connection"
+	HeaderUpgrade                  = "Upgrade"
+	HeaderProtocolVersion          = "X-Stream-Protocol-Version"
+	HeaderAcceptedProtocolVersions = "X-Accepted-Stream-Protocol-Versions"
+)
+
+// NewStreamHandler defines a function that is called when a new Stream is
+// received. If no error is returned, the Stream is accepted; otherwise,
+// the stream is rejected. After the reply frame has been sent, replySent is closed.
+type NewStreamHandler func(stream Stream, replySent <-chan struct{}) error
+
+// NoOpNewStreamHandler is a stream handler that accepts a new stream and
+// performs no other logic.
+func NoOpNewStreamHandler(stream Stream, replySent <-chan struct{}) error { return nil }
+
+// Dialer knows how to open a streaming connection to a server.
+type Dialer interface {
+
+	// Dial opens a streaming connection to a server using one of the protocols
+	// specified (in order of most preferred to least preferred).
+	Dial(protocols ...string) (Connection, string, error)
+}
+
+// UpgradeRoundTripper is a type of http.RoundTripper that is able to upgrade
+// HTTP requests to support multiplexed bidirectional streams. After RoundTrip()
+// is invoked, if the upgrade is successful, clients may retrieve the upgraded
+// connection by calling UpgradeRoundTripper.Connection().
+type UpgradeRoundTripper interface {
+	http.RoundTripper
+	// NewConnection validates the response and creates a new Connection.
+	NewConnection(resp *http.Response) (Connection, error)
+}
+
+// ResponseUpgrader knows how to upgrade HTTP requests and responses to
+// add streaming support to them.
+type ResponseUpgrader interface {
+	// UpgradeResponse upgrades an HTTP response to one that supports multiplexed
+	// streams. newStreamHandler will be called asynchronously whenever the
+	// other end of the upgraded connection creates a new stream.
+	UpgradeResponse(w http.ResponseWriter, req *http.Request, newStreamHandler NewStreamHandler) Connection
+}
+
+// Connection represents an upgraded HTTP connection.
+type Connection interface {
+	// CreateStream creates a new Stream with the supplied headers.
+	CreateStream(headers http.Header) (Stream, error)
+	// Close resets all streams and closes the connection.
+	Close() error
+	// CloseChan returns a channel that is closed when the underlying connection is closed.
+	CloseChan() <-chan bool
+	// SetIdleTimeout sets the amount of time the connection may remain idle before
+	// it is automatically closed.
+	SetIdleTimeout(timeout time.Duration)
+}
+
+// Stream represents a bidirectional communications channel that is part of an
+// upgraded connection.
+type Stream interface {
+	io.ReadWriteCloser
+	// Reset closes both directions of the stream, indicating that neither client
+	// or server can use it any more.
+	Reset() error
+	// Headers returns the headers used to create the stream.
+	Headers() http.Header
+	// Identifier returns the stream's ID.
+	Identifier() uint32
+}
+
+// IsUpgradeRequest returns true if the given request is a connection upgrade request
+func IsUpgradeRequest(req *http.Request) bool {
+	for _, h := range req.Header[http.CanonicalHeaderKey(HeaderConnection)] {
+		if strings.Contains(strings.ToLower(h), strings.ToLower(HeaderUpgrade)) {
+			return true
+		}
+	}
+	return false
+}
+
+func negotiateProtocol(clientProtocols, serverProtocols []string) string {
+	for i := range clientProtocols {
+		for j := range serverProtocols {
+			if clientProtocols[i] == serverProtocols[j] {
+				return clientProtocols[i]
+			}
+		}
+	}
+	return ""
+}
+
+// Handshake performs a subprotocol negotiation. If the client did request a
+// subprotocol, Handshake will select the first common value found in
+// serverProtocols. If a match is found, Handshake adds a response header
+// indicating the chosen subprotocol. If no match is found, HTTP forbidden is
+// returned, along with a response header containing the list of protocols the
+// server can accept.
+func Handshake(req *http.Request, w http.ResponseWriter, serverProtocols []string) (string, error) {
+	clientProtocols := req.Header[http.CanonicalHeaderKey(HeaderProtocolVersion)]
+	if len(clientProtocols) == 0 {
+		return "", fmt.Errorf("unable to upgrade: %s is required", HeaderProtocolVersion)
+	}
+
+	if len(serverProtocols) == 0 {
+		panic(fmt.Errorf("unable to upgrade: serverProtocols is required"))
+	}
+
+	negotiatedProtocol := negotiateProtocol(clientProtocols, serverProtocols)
+	if len(negotiatedProtocol) == 0 {
+		for i := range serverProtocols {
+			w.Header().Add(HeaderAcceptedProtocolVersions, serverProtocols[i])
+		}
+		err := fmt.Errorf("unable to upgrade: unable to negotiate protocol: client supports %v, server accepts %v", clientProtocols, serverProtocols)
+		http.Error(w, err.Error(), http.StatusForbidden)
+		return "", err
+	}
+
+	w.Header().Add(HeaderProtocolVersion, negotiatedProtocol)
+	return negotiatedProtocol, nil
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/connection.go b/vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/connection.go
new file mode 100644
index 00000000..9d222faa
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/connection.go
@@ -0,0 +1,145 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package spdy
+
+import (
+	"net"
+	"net/http"
+	"sync"
+	"time"
+
+	"github.com/docker/spdystream"
+	"k8s.io/apimachinery/pkg/util/httpstream"
+	"k8s.io/klog"
+)
+
+// connection maintains state about a spdystream.Connection and its associated
+// streams.
+type connection struct {
+	conn             *spdystream.Connection
+	streams          []httpstream.Stream
+	streamLock       sync.Mutex
+	newStreamHandler httpstream.NewStreamHandler
+}
+
+// NewClientConnection creates a new SPDY client connection.
+func NewClientConnection(conn net.Conn) (httpstream.Connection, error) {
+	spdyConn, err := spdystream.NewConnection(conn, false)
+	if err != nil {
+		defer conn.Close()
+		return nil, err
+	}
+
+	return newConnection(spdyConn, httpstream.NoOpNewStreamHandler), nil
+}
+
+// NewServerConnection creates a new SPDY server connection. newStreamHandler
+// will be invoked when the server receives a newly created stream from the
+// client.
+func NewServerConnection(conn net.Conn, newStreamHandler httpstream.NewStreamHandler) (httpstream.Connection, error) {
+	spdyConn, err := spdystream.NewConnection(conn, true)
+	if err != nil {
+		defer conn.Close()
+		return nil, err
+	}
+
+	return newConnection(spdyConn, newStreamHandler), nil
+}
+
+// newConnection returns a new connection wrapping conn. newStreamHandler
+// will be invoked when the server receives a newly created stream from the
+// client.
+func newConnection(conn *spdystream.Connection, newStreamHandler httpstream.NewStreamHandler) httpstream.Connection {
+	c := &connection{conn: conn, newStreamHandler: newStreamHandler}
+	go conn.Serve(c.newSpdyStream)
+	return c
+}
+
+// createStreamResponseTimeout indicates how long to wait for the other side to
+// acknowledge the new stream before timing out.
+const createStreamResponseTimeout = 30 * time.Second
+
+// Close first sends a reset for all of the connection's streams, and then
+// closes the underlying spdystream.Connection.
+func (c *connection) Close() error {
+	c.streamLock.Lock()
+	for _, s := range c.streams {
+		// calling Reset instead of Close ensures that all streams are fully torn down
+		s.Reset()
+	}
+	c.streams = make([]httpstream.Stream, 0)
+	c.streamLock.Unlock()
+
+	// now that all streams are fully torn down, it's safe to call close on the underlying connection,
+	// which should be able to terminate immediately at this point, instead of waiting for any
+	// remaining graceful stream termination.
+	return c.conn.Close()
+}
+
+// CreateStream creates a new stream with the specified headers and registers
+// it with the connection.
+func (c *connection) CreateStream(headers http.Header) (httpstream.Stream, error) {
+	stream, err := c.conn.CreateStream(headers, nil, false)
+	if err != nil {
+		return nil, err
+	}
+	if err = stream.WaitTimeout(createStreamResponseTimeout); err != nil {
+		return nil, err
+	}
+
+	c.registerStream(stream)
+	return stream, nil
+}
+
+// registerStream adds the stream s to the connection's list of streams that
+// it owns.
+func (c *connection) registerStream(s httpstream.Stream) {
+	c.streamLock.Lock()
+	c.streams = append(c.streams, s)
+	c.streamLock.Unlock()
+}
+
+// CloseChan returns a channel that, when closed, indicates that the underlying
+// spdystream.Connection has been closed.
+func (c *connection) CloseChan() <-chan bool {
+	return c.conn.CloseChan()
+}
+
+// newSpdyStream is the internal new stream handler used by spdystream.Connection.Serve.
+// It calls connection's newStreamHandler, giving it the opportunity to accept or reject
+// the stream. If newStreamHandler returns an error, the stream is rejected. If not, the
+// stream is accepted and registered with the connection.
+func (c *connection) newSpdyStream(stream *spdystream.Stream) {
+	replySent := make(chan struct{})
+	err := c.newStreamHandler(stream, replySent)
+	rejectStream := (err != nil)
+	if rejectStream {
+		klog.Warningf("Stream rejected: %v", err)
+		stream.Reset()
+		return
+	}
+
+	c.registerStream(stream)
+	stream.SendReply(http.Header{}, rejectStream)
+	close(replySent)
+}
+
+// SetIdleTimeout sets the amount of time the connection may remain idle before
+// it is automatically closed.
+func (c *connection) SetIdleTimeout(timeout time.Duration) {
+	c.conn.SetIdleTimeout(timeout)
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/roundtripper.go b/vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/roundtripper.go
new file mode 100644
index 00000000..2699597e
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/roundtripper.go
@@ -0,0 +1,335 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package spdy
+
+import (
+	"bufio"
+	"bytes"
+	"context"
+	"crypto/tls"
+	"encoding/base64"
+	"fmt"
+	"io"
+	"io/ioutil"
+	"net"
+	"net/http"
+	"net/http/httputil"
+	"net/url"
+	"strings"
+
+	apierrors "k8s.io/apimachinery/pkg/api/errors"
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	"k8s.io/apimachinery/pkg/runtime"
+	"k8s.io/apimachinery/pkg/runtime/serializer"
+	"k8s.io/apimachinery/pkg/util/httpstream"
+	utilnet "k8s.io/apimachinery/pkg/util/net"
+	"k8s.io/apimachinery/third_party/forked/golang/netutil"
+)
+
+// SpdyRoundTripper knows how to upgrade an HTTP request to one that supports
+// multiplexed streams. After RoundTrip() is invoked, Conn will be set
+// and usable. SpdyRoundTripper implements the UpgradeRoundTripper interface.
+type SpdyRoundTripper struct {
+	//tlsConfig holds the TLS configuration settings to use when connecting
+	//to the remote server.
+	tlsConfig *tls.Config
+
+	/* TODO according to http://golang.org/pkg/net/http/#RoundTripper, a RoundTripper
+	   must be safe for use by multiple concurrent goroutines. If this is absolutely
+	   necessary, we could keep a map from http.Request to net.Conn. In practice,
+	   a client will create an http.Client, set the transport to a new insteace of
+	   SpdyRoundTripper, and use it a single time, so this hopefully won't be an issue.
+	*/
+	// conn is the underlying network connection to the remote server.
+	conn net.Conn
+
+	// Dialer is the dialer used to connect.  Used if non-nil.
+	Dialer *net.Dialer
+
+	// proxier knows which proxy to use given a request, defaults to http.ProxyFromEnvironment
+	// Used primarily for mocking the proxy discovery in tests.
+	proxier func(req *http.Request) (*url.URL, error)
+
+	// followRedirects indicates if the round tripper should examine responses for redirects and
+	// follow them.
+	followRedirects bool
+	// requireSameHostRedirects restricts redirect following to only follow redirects to the same host
+	// as the original request.
+	requireSameHostRedirects bool
+}
+
+var _ utilnet.TLSClientConfigHolder = &SpdyRoundTripper{}
+var _ httpstream.UpgradeRoundTripper = &SpdyRoundTripper{}
+var _ utilnet.Dialer = &SpdyRoundTripper{}
+
+// NewRoundTripper creates a new SpdyRoundTripper that will use
+// the specified tlsConfig.
+func NewRoundTripper(tlsConfig *tls.Config, followRedirects, requireSameHostRedirects bool) httpstream.UpgradeRoundTripper {
+	return NewSpdyRoundTripper(tlsConfig, followRedirects, requireSameHostRedirects)
+}
+
+// NewSpdyRoundTripper creates a new SpdyRoundTripper that will use
+// the specified tlsConfig. This function is mostly meant for unit tests.
+func NewSpdyRoundTripper(tlsConfig *tls.Config, followRedirects, requireSameHostRedirects bool) *SpdyRoundTripper {
+	return &SpdyRoundTripper{
+		tlsConfig:                tlsConfig,
+		followRedirects:          followRedirects,
+		requireSameHostRedirects: requireSameHostRedirects,
+	}
+}
+
+// TLSClientConfig implements pkg/util/net.TLSClientConfigHolder for proper TLS checking during
+// proxying with a spdy roundtripper.
+func (s *SpdyRoundTripper) TLSClientConfig() *tls.Config {
+	return s.tlsConfig
+}
+
+// Dial implements k8s.io/apimachinery/pkg/util/net.Dialer.
+func (s *SpdyRoundTripper) Dial(req *http.Request) (net.Conn, error) {
+	conn, err := s.dial(req)
+	if err != nil {
+		return nil, err
+	}
+
+	if err := req.Write(conn); err != nil {
+		conn.Close()
+		return nil, err
+	}
+
+	return conn, nil
+}
+
+// dial dials the host specified by req, using TLS if appropriate, optionally
+// using a proxy server if one is configured via environment variables.
+func (s *SpdyRoundTripper) dial(req *http.Request) (net.Conn, error) {
+	proxier := s.proxier
+	if proxier == nil {
+		proxier = utilnet.NewProxierWithNoProxyCIDR(http.ProxyFromEnvironment)
+	}
+	proxyURL, err := proxier(req)
+	if err != nil {
+		return nil, err
+	}
+
+	if proxyURL == nil {
+		return s.dialWithoutProxy(req.Context(), req.URL)
+	}
+
+	// ensure we use a canonical host with proxyReq
+	targetHost := netutil.CanonicalAddr(req.URL)
+
+	// proxying logic adapted from http://blog.h6t.eu/post/74098062923/golang-websocket-with-http-proxy-support
+	proxyReq := http.Request{
+		Method: "CONNECT",
+		URL:    &url.URL{},
+		Host:   targetHost,
+	}
+
+	if pa := s.proxyAuth(proxyURL); pa != "" {
+		proxyReq.Header = http.Header{}
+		proxyReq.Header.Set("Proxy-Authorization", pa)
+	}
+
+	proxyDialConn, err := s.dialWithoutProxy(req.Context(), proxyURL)
+	if err != nil {
+		return nil, err
+	}
+
+	proxyClientConn := httputil.NewProxyClientConn(proxyDialConn, nil)
+	_, err = proxyClientConn.Do(&proxyReq)
+	if err != nil && err != httputil.ErrPersistEOF {
+		return nil, err
+	}
+
+	rwc, _ := proxyClientConn.Hijack()
+
+	if req.URL.Scheme != "https" {
+		return rwc, nil
+	}
+
+	host, _, err := net.SplitHostPort(targetHost)
+	if err != nil {
+		return nil, err
+	}
+
+	tlsConfig := s.tlsConfig
+	switch {
+	case tlsConfig == nil:
+		tlsConfig = &tls.Config{ServerName: host}
+	case len(tlsConfig.ServerName) == 0:
+		tlsConfig = tlsConfig.Clone()
+		tlsConfig.ServerName = host
+	}
+
+	tlsConn := tls.Client(rwc, tlsConfig)
+
+	// need to manually call Handshake() so we can call VerifyHostname() below
+	if err := tlsConn.Handshake(); err != nil {
+		return nil, err
+	}
+
+	// Return if we were configured to skip validation
+	if tlsConfig.InsecureSkipVerify {
+		return tlsConn, nil
+	}
+
+	if err := tlsConn.VerifyHostname(tlsConfig.ServerName); err != nil {
+		return nil, err
+	}
+
+	return tlsConn, nil
+}
+
+// dialWithoutProxy dials the host specified by url, using TLS if appropriate.
+func (s *SpdyRoundTripper) dialWithoutProxy(ctx context.Context, url *url.URL) (net.Conn, error) {
+	dialAddr := netutil.CanonicalAddr(url)
+
+	if url.Scheme == "http" {
+		if s.Dialer == nil {
+			var d net.Dialer
+			return d.DialContext(ctx, "tcp", dialAddr)
+		} else {
+			return s.Dialer.DialContext(ctx, "tcp", dialAddr)
+		}
+	}
+
+	// TODO validate the TLSClientConfig is set up?
+	var conn *tls.Conn
+	var err error
+	if s.Dialer == nil {
+		conn, err = tls.Dial("tcp", dialAddr, s.tlsConfig)
+	} else {
+		conn, err = tls.DialWithDialer(s.Dialer, "tcp", dialAddr, s.tlsConfig)
+	}
+	if err != nil {
+		return nil, err
+	}
+
+	// Return if we were configured to skip validation
+	if s.tlsConfig != nil && s.tlsConfig.InsecureSkipVerify {
+		return conn, nil
+	}
+
+	host, _, err := net.SplitHostPort(dialAddr)
+	if err != nil {
+		return nil, err
+	}
+	if s.tlsConfig != nil && len(s.tlsConfig.ServerName) > 0 {
+		host = s.tlsConfig.ServerName
+	}
+	err = conn.VerifyHostname(host)
+	if err != nil {
+		return nil, err
+	}
+
+	return conn, nil
+}
+
+// proxyAuth returns, for a given proxy URL, the value to be used for the Proxy-Authorization header
+func (s *SpdyRoundTripper) proxyAuth(proxyURL *url.URL) string {
+	if proxyURL == nil || proxyURL.User == nil {
+		return ""
+	}
+	credentials := proxyURL.User.String()
+	encodedAuth := base64.StdEncoding.EncodeToString([]byte(credentials))
+	return fmt.Sprintf("Basic %s", encodedAuth)
+}
+
+// RoundTrip executes the Request and upgrades it. After a successful upgrade,
+// clients may call SpdyRoundTripper.Connection() to retrieve the upgraded
+// connection.
+func (s *SpdyRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) {
+	header := utilnet.CloneHeader(req.Header)
+	header.Add(httpstream.HeaderConnection, httpstream.HeaderUpgrade)
+	header.Add(httpstream.HeaderUpgrade, HeaderSpdy31)
+
+	var (
+		conn        net.Conn
+		rawResponse []byte
+		err         error
+	)
+
+	if s.followRedirects {
+		conn, rawResponse, err = utilnet.ConnectWithRedirects(req.Method, req.URL, header, req.Body, s, s.requireSameHostRedirects)
+	} else {
+		clone := utilnet.CloneRequest(req)
+		clone.Header = header
+		conn, err = s.Dial(clone)
+	}
+	if err != nil {
+		return nil, err
+	}
+
+	responseReader := bufio.NewReader(
+		io.MultiReader(
+			bytes.NewBuffer(rawResponse),
+			conn,
+		),
+	)
+
+	resp, err := http.ReadResponse(responseReader, nil)
+	if err != nil {
+		if conn != nil {
+			conn.Close()
+		}
+		return nil, err
+	}
+
+	s.conn = conn
+
+	return resp, nil
+}
+
+// NewConnection validates the upgrade response, creating and returning a new
+// httpstream.Connection if there were no errors.
+func (s *SpdyRoundTripper) NewConnection(resp *http.Response) (httpstream.Connection, error) {
+	connectionHeader := strings.ToLower(resp.Header.Get(httpstream.HeaderConnection))
+	upgradeHeader := strings.ToLower(resp.Header.Get(httpstream.HeaderUpgrade))
+	if (resp.StatusCode != http.StatusSwitchingProtocols) || !strings.Contains(connectionHeader, strings.ToLower(httpstream.HeaderUpgrade)) || !strings.Contains(upgradeHeader, strings.ToLower(HeaderSpdy31)) {
+		defer resp.Body.Close()
+		responseError := ""
+		responseErrorBytes, err := ioutil.ReadAll(resp.Body)
+		if err != nil {
+			responseError = "unable to read error from server response"
+		} else {
+			// TODO: I don't belong here, I should be abstracted from this class
+			if obj, _, err := statusCodecs.UniversalDecoder().Decode(responseErrorBytes, nil, &metav1.Status{}); err == nil {
+				if status, ok := obj.(*metav1.Status); ok {
+					return nil, &apierrors.StatusError{ErrStatus: *status}
+				}
+			}
+			responseError = string(responseErrorBytes)
+			responseError = strings.TrimSpace(responseError)
+		}
+
+		return nil, fmt.Errorf("unable to upgrade connection: %s", responseError)
+	}
+
+	return NewClientConnection(s.conn)
+}
+
+// statusScheme is private scheme for the decoding here until someone fixes the TODO in NewConnection
+var statusScheme = runtime.NewScheme()
+
+// ParameterCodec knows about query parameters used with the meta v1 API spec.
+var statusCodecs = serializer.NewCodecFactory(statusScheme)
+
+func init() {
+	statusScheme.AddUnversionedTypes(metav1.SchemeGroupVersion,
+		&metav1.Status{},
+	)
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/upgrade.go b/vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/upgrade.go
new file mode 100644
index 00000000..045d214d
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/upgrade.go
@@ -0,0 +1,107 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package spdy
+
+import (
+	"bufio"
+	"fmt"
+	"io"
+	"net"
+	"net/http"
+	"strings"
+	"sync/atomic"
+
+	"k8s.io/apimachinery/pkg/util/httpstream"
+	"k8s.io/apimachinery/pkg/util/runtime"
+)
+
+const HeaderSpdy31 = "SPDY/3.1"
+
+// responseUpgrader knows how to upgrade HTTP responses. It
+// implements the httpstream.ResponseUpgrader interface.
+type responseUpgrader struct {
+}
+
+// connWrapper is used to wrap a hijacked connection and its bufio.Reader. All
+// calls will be handled directly by the underlying net.Conn with the exception
+// of Read and Close calls, which will consider data in the bufio.Reader. This
+// ensures that data already inside the used bufio.Reader instance is also
+// read.
+type connWrapper struct {
+	net.Conn
+	closed    int32
+	bufReader *bufio.Reader
+}
+
+func (w *connWrapper) Read(b []byte) (n int, err error) {
+	if atomic.LoadInt32(&w.closed) == 1 {
+		return 0, io.EOF
+	}
+	return w.bufReader.Read(b)
+}
+
+func (w *connWrapper) Close() error {
+	err := w.Conn.Close()
+	atomic.StoreInt32(&w.closed, 1)
+	return err
+}
+
+// NewResponseUpgrader returns a new httpstream.ResponseUpgrader that is
+// capable of upgrading HTTP responses using SPDY/3.1 via the
+// spdystream package.
+func NewResponseUpgrader() httpstream.ResponseUpgrader {
+	return responseUpgrader{}
+}
+
+// UpgradeResponse upgrades an HTTP response to one that supports multiplexed
+// streams. newStreamHandler will be called synchronously whenever the
+// other end of the upgraded connection creates a new stream.
+func (u responseUpgrader) UpgradeResponse(w http.ResponseWriter, req *http.Request, newStreamHandler httpstream.NewStreamHandler) httpstream.Connection {
+	connectionHeader := strings.ToLower(req.Header.Get(httpstream.HeaderConnection))
+	upgradeHeader := strings.ToLower(req.Header.Get(httpstream.HeaderUpgrade))
+	if !strings.Contains(connectionHeader, strings.ToLower(httpstream.HeaderUpgrade)) || !strings.Contains(upgradeHeader, strings.ToLower(HeaderSpdy31)) {
+		errorMsg := fmt.Sprintf("unable to upgrade: missing upgrade headers in request: %#v", req.Header)
+		http.Error(w, errorMsg, http.StatusBadRequest)
+		return nil
+	}
+
+	hijacker, ok := w.(http.Hijacker)
+	if !ok {
+		errorMsg := fmt.Sprintf("unable to upgrade: unable to hijack response")
+		http.Error(w, errorMsg, http.StatusInternalServerError)
+		return nil
+	}
+
+	w.Header().Add(httpstream.HeaderConnection, httpstream.HeaderUpgrade)
+	w.Header().Add(httpstream.HeaderUpgrade, HeaderSpdy31)
+	w.WriteHeader(http.StatusSwitchingProtocols)
+
+	conn, bufrw, err := hijacker.Hijack()
+	if err != nil {
+		runtime.HandleError(fmt.Errorf("unable to upgrade: error hijacking response: %v", err))
+		return nil
+	}
+
+	connWithBuf := &connWrapper{Conn: conn, bufReader: bufrw.Reader}
+	spdyConn, err := NewServerConnection(connWithBuf, newStreamHandler)
+	if err != nil {
+		runtime.HandleError(fmt.Errorf("unable to upgrade: error creating SPDY server connection: %v", err))
+		return nil
+	}
+
+	return spdyConn
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.pb.go b/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.pb.go
index 64cbc770..ec1cb70f 100644
--- a/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.pb.go
+++ b/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.pb.go
@@ -38,7 +38,7 @@ var _ = math.Inf
 // is compatible with the proto package it is being compiled against.
 // A compilation error at this line likely means your copy of the
 // proto package needs to be updated.
-const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
+const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
 
 func (m *IntOrString) Reset()      { *m = IntOrString{} }
 func (*IntOrString) ProtoMessage() {}
@@ -289,6 +289,7 @@ func (m *IntOrString) Unmarshal(dAtA []byte) error {
 func skipGenerated(dAtA []byte) (n int, err error) {
 	l := len(dAtA)
 	iNdEx := 0
+	depth := 0
 	for iNdEx < l {
 		var wire uint64
 		for shift := uint(0); ; shift += 7 {
@@ -320,10 +321,8 @@ func skipGenerated(dAtA []byte) (n int, err error) {
 					break
 				}
 			}
-			return iNdEx, nil
 		case 1:
 			iNdEx += 8
-			return iNdEx, nil
 		case 2:
 			var length int
 			for shift := uint(0); ; shift += 7 {
@@ -344,55 +343,30 @@ func skipGenerated(dAtA []byte) (n int, err error) {
 				return 0, ErrInvalidLengthGenerated
 			}
 			iNdEx += length
-			if iNdEx < 0 {
-				return 0, ErrInvalidLengthGenerated
-			}
-			return iNdEx, nil
 		case 3:
-			for {
-				var innerWire uint64
-				var start int = iNdEx
-				for shift := uint(0); ; shift += 7 {
-					if shift >= 64 {
-						return 0, ErrIntOverflowGenerated
-					}
-					if iNdEx >= l {
-						return 0, io.ErrUnexpectedEOF
-					}
-					b := dAtA[iNdEx]
-					iNdEx++
-					innerWire |= (uint64(b) & 0x7F) << shift
-					if b < 0x80 {
-						break
-					}
-				}
-				innerWireType := int(innerWire & 0x7)
-				if innerWireType == 4 {
-					break
-				}
-				next, err := skipGenerated(dAtA[start:])
-				if err != nil {
-					return 0, err
-				}
-				iNdEx = start + next
-				if iNdEx < 0 {
-					return 0, ErrInvalidLengthGenerated
-				}
-			}
-			return iNdEx, nil
+			depth++
 		case 4:
-			return iNdEx, nil
+			if depth == 0 {
+				return 0, ErrUnexpectedEndOfGroupGenerated
+			}
+			depth--
 		case 5:
 			iNdEx += 4
-			return iNdEx, nil
 		default:
 			return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
 		}
+		if iNdEx < 0 {
+			return 0, ErrInvalidLengthGenerated
+		}
+		if depth == 0 {
+			return iNdEx, nil
+		}
 	}
-	panic("unreachable")
+	return 0, io.ErrUnexpectedEOF
 }
 
 var (
-	ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling")
-	ErrIntOverflowGenerated   = fmt.Errorf("proto: integer overflow")
+	ErrInvalidLengthGenerated        = fmt.Errorf("proto: negative length found during unmarshaling")
+	ErrIntOverflowGenerated          = fmt.Errorf("proto: integer overflow")
+	ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group")
 )
diff --git a/vendor/k8s.io/apimachinery/pkg/util/intstr/intstr.go b/vendor/k8s.io/apimachinery/pkg/util/intstr/intstr.go
index 2df62955..cb974dcf 100644
--- a/vendor/k8s.io/apimachinery/pkg/util/intstr/intstr.go
+++ b/vendor/k8s.io/apimachinery/pkg/util/intstr/intstr.go
@@ -97,7 +97,8 @@ func (intstr *IntOrString) String() string {
 }
 
 // IntValue returns the IntVal if type Int, or if
-// it is a String, will attempt a conversion to int.
+// it is a String, will attempt a conversion to int,
+// returning 0 if a parsing error occurs.
 func (intstr *IntOrString) IntValue() int {
 	if intstr.Type == String {
 		i, _ := strconv.Atoi(intstr.StrVal)
diff --git a/vendor/k8s.io/apimachinery/pkg/util/net/http.go b/vendor/k8s.io/apimachinery/pkg/util/net/http.go
index f9540c63..0ba586bf 100644
--- a/vendor/k8s.io/apimachinery/pkg/util/net/http.go
+++ b/vendor/k8s.io/apimachinery/pkg/util/net/http.go
@@ -206,13 +206,17 @@ func GetHTTPClient(req *http.Request) string {
 	return "unknown"
 }
 
-// SourceIPs splits the comma separated X-Forwarded-For header or returns the X-Real-Ip header or req.RemoteAddr,
-// in that order, ignoring invalid IPs. It returns nil if all of these are empty or invalid.
+// SourceIPs splits the comma separated X-Forwarded-For header and joins it with
+// the X-Real-Ip header and/or req.RemoteAddr, ignoring invalid IPs.
+// The X-Real-Ip is omitted if it's already present in the X-Forwarded-For chain.
+// The req.RemoteAddr is always the last IP in the returned list.
+// It returns nil if all of these are empty or invalid.
 func SourceIPs(req *http.Request) []net.IP {
+	var srcIPs []net.IP
+
 	hdr := req.Header
 	// First check the X-Forwarded-For header for requests via proxy.
 	hdrForwardedFor := hdr.Get("X-Forwarded-For")
-	forwardedForIPs := []net.IP{}
 	if hdrForwardedFor != "" {
 		// X-Forwarded-For can be a csv of IPs in case of multiple proxies.
 		// Use the first valid one.
@@ -220,38 +224,49 @@ func SourceIPs(req *http.Request) []net.IP {
 		for _, part := range parts {
 			ip := net.ParseIP(strings.TrimSpace(part))
 			if ip != nil {
-				forwardedForIPs = append(forwardedForIPs, ip)
+				srcIPs = append(srcIPs, ip)
 			}
 		}
 	}
-	if len(forwardedForIPs) > 0 {
-		return forwardedForIPs
-	}
 
 	// Try the X-Real-Ip header.
 	hdrRealIp := hdr.Get("X-Real-Ip")
 	if hdrRealIp != "" {
 		ip := net.ParseIP(hdrRealIp)
-		if ip != nil {
-			return []net.IP{ip}
+		// Only append the X-Real-Ip if it's not already contained in the X-Forwarded-For chain.
+		if ip != nil && !containsIP(srcIPs, ip) {
+			srcIPs = append(srcIPs, ip)
 		}
 	}
 
-	// Fallback to Remote Address in request, which will give the correct client IP when there is no proxy.
+	// Always include the request Remote Address as it cannot be easily spoofed.
+	var remoteIP net.IP
 	// Remote Address in Go's HTTP server is in the form host:port so we need to split that first.
 	host, _, err := net.SplitHostPort(req.RemoteAddr)
 	if err == nil {
-		if remoteIP := net.ParseIP(host); remoteIP != nil {
-			return []net.IP{remoteIP}
+		remoteIP = net.ParseIP(host)
+	}
+	// Fallback if Remote Address was just IP.
+	if remoteIP == nil {
+		remoteIP = net.ParseIP(req.RemoteAddr)
+	}
+
+	// Don't duplicate remote IP if it's already the last address in the chain.
+	if remoteIP != nil && (len(srcIPs) == 0 || !remoteIP.Equal(srcIPs[len(srcIPs)-1])) {
+		srcIPs = append(srcIPs, remoteIP)
+	}
+
+	return srcIPs
+}
+
+// Checks whether the given IP address is contained in the list of IPs.
+func containsIP(ips []net.IP, ip net.IP) bool {
+	for _, v := range ips {
+		if v.Equal(ip) {
+			return true
 		}
 	}
-
-	// Fallback if Remote Address was just IP.
-	if remoteIP := net.ParseIP(req.RemoteAddr); remoteIP != nil {
-		return []net.IP{remoteIP}
-	}
-
-	return nil
+	return false
 }
 
 // Extracts and returns the clients IP from the given request.
diff --git a/vendor/k8s.io/apimachinery/pkg/util/remotecommand/constants.go b/vendor/k8s.io/apimachinery/pkg/util/remotecommand/constants.go
new file mode 100644
index 00000000..acfeb827
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/util/remotecommand/constants.go
@@ -0,0 +1,53 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package remotecommand
+
+import (
+	"time"
+
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+const (
+	DefaultStreamCreationTimeout = 30 * time.Second
+
+	// The SPDY subprotocol "channel.k8s.io" is used for remote command
+	// attachment/execution. This represents the initial unversioned subprotocol,
+	// which has the known bugs http://issues.k8s.io/13394 and
+	// http://issues.k8s.io/13395.
+	StreamProtocolV1Name = "channel.k8s.io"
+
+	// The SPDY subprotocol "v2.channel.k8s.io" is used for remote command
+	// attachment/execution. It is the second version of the subprotocol and
+	// resolves the issues present in the first version.
+	StreamProtocolV2Name = "v2.channel.k8s.io"
+
+	// The SPDY subprotocol "v3.channel.k8s.io" is used for remote command
+	// attachment/execution. It is the third version of the subprotocol and
+	// adds support for resizing container terminals.
+	StreamProtocolV3Name = "v3.channel.k8s.io"
+
+	// The SPDY subprotocol "v4.channel.k8s.io" is used for remote command
+	// attachment/execution. It is the 4th version of the subprotocol and
+	// adds support for exit codes.
+	StreamProtocolV4Name = "v4.channel.k8s.io"
+
+	NonZeroExitCodeReason = metav1.StatusReason("NonZeroExitCode")
+	ExitCodeCauseType     = metav1.CauseType("ExitCode")
+)
+
+var SupportedStreamingProtocols = []string{StreamProtocolV4Name, StreamProtocolV3Name, StreamProtocolV2Name, StreamProtocolV1Name}
diff --git a/vendor/k8s.io/apimachinery/pkg/util/validation/validation.go b/vendor/k8s.io/apimachinery/pkg/util/validation/validation.go
index 8e1907c2..915231f2 100644
--- a/vendor/k8s.io/apimachinery/pkg/util/validation/validation.go
+++ b/vendor/k8s.io/apimachinery/pkg/util/validation/validation.go
@@ -109,6 +109,44 @@ func IsFullyQualifiedDomainName(fldPath *field.Path, name string) field.ErrorLis
 	return allErrors
 }
 
+// Allowed characters in an HTTP Path as defined by RFC 3986. A HTTP path may
+// contain:
+// * unreserved characters (alphanumeric, '-', '.', '_', '~')
+// * percent-encoded octets
+// * sub-delims ("!", "$", "&", "'", "(", ")", "*", "+", ",", ";", "=")
+// * a colon character (":")
+const httpPathFmt string = `[A-Za-z0-9/\-._~%!$&'()*+,;=:]+`
+
+var httpPathRegexp = regexp.MustCompile("^" + httpPathFmt + "$")
+
+// IsDomainPrefixedPath checks if the given string is a domain-prefixed path
+// (e.g. acme.io/foo). All characters before the first "/" must be a valid
+// subdomain as defined by RFC 1123. All characters trailing the first "/" must
+// be valid HTTP Path characters as defined by RFC 3986.
+func IsDomainPrefixedPath(fldPath *field.Path, dpPath string) field.ErrorList {
+	var allErrs field.ErrorList
+	if len(dpPath) == 0 {
+		return append(allErrs, field.Required(fldPath, ""))
+	}
+
+	segments := strings.SplitN(dpPath, "/", 2)
+	if len(segments) != 2 || len(segments[0]) == 0 || len(segments[1]) == 0 {
+		return append(allErrs, field.Invalid(fldPath, dpPath, "must be a domain-prefixed path (such as \"acme.io/foo\")"))
+	}
+
+	host := segments[0]
+	for _, err := range IsDNS1123Subdomain(host) {
+		allErrs = append(allErrs, field.Invalid(fldPath, host, err))
+	}
+
+	path := segments[1]
+	if !httpPathRegexp.MatchString(path) {
+		return append(allErrs, field.Invalid(fldPath, path, RegexError("Invalid path", httpPathFmt)))
+	}
+
+	return allErrs
+}
+
 const labelValueFmt string = "(" + qualifiedNameFmt + ")?"
 const labelValueErrMsg string = "a valid label must be an empty string or consist of alphanumeric characters, '-', '_' or '.', and must start and end with an alphanumeric character"
 
diff --git a/vendor/k8s.io/apimachinery/pkg/util/wait/wait.go b/vendor/k8s.io/apimachinery/pkg/util/wait/wait.go
index 386c3e7e..4cb0c122 100644
--- a/vendor/k8s.io/apimachinery/pkg/util/wait/wait.go
+++ b/vendor/k8s.io/apimachinery/pkg/util/wait/wait.go
@@ -19,10 +19,12 @@ package wait
 import (
 	"context"
 	"errors"
+	"math"
 	"math/rand"
 	"sync"
 	"time"
 
+	"k8s.io/apimachinery/pkg/util/clock"
 	"k8s.io/apimachinery/pkg/util/runtime"
 )
 
@@ -128,9 +130,15 @@ func NonSlidingUntilWithContext(ctx context.Context, f func(context.Context), pe
 // Close stopCh to stop. f may not be invoked if stop channel is already
 // closed. Pass NeverStop to if you don't want it stop.
 func JitterUntil(f func(), period time.Duration, jitterFactor float64, sliding bool, stopCh <-chan struct{}) {
-	var t *time.Timer
-	var sawTimeout bool
+	BackoffUntil(f, NewJitteredBackoffManager(period, jitterFactor, &clock.RealClock{}), sliding, stopCh)
+}
 
+// BackoffUntil loops until stop channel is closed, run f every duration given by BackoffManager.
+//
+// If sliding is true, the period is computed after f runs. If it is false then
+// period includes the runtime for f.
+func BackoffUntil(f func(), backoff BackoffManager, sliding bool, stopCh <-chan struct{}) {
+	var t clock.Timer
 	for {
 		select {
 		case <-stopCh:
@@ -138,13 +146,8 @@ func JitterUntil(f func(), period time.Duration, jitterFactor float64, sliding b
 		default:
 		}
 
-		jitteredPeriod := period
-		if jitterFactor > 0.0 {
-			jitteredPeriod = Jitter(period, jitterFactor)
-		}
-
 		if !sliding {
-			t = resetOrReuseTimer(t, jitteredPeriod, sawTimeout)
+			t = backoff.Backoff()
 		}
 
 		func() {
@@ -153,7 +156,7 @@ func JitterUntil(f func(), period time.Duration, jitterFactor float64, sliding b
 		}()
 
 		if sliding {
-			t = resetOrReuseTimer(t, jitteredPeriod, sawTimeout)
+			t = backoff.Backoff()
 		}
 
 		// NOTE: b/c there is no priority selection in golang
@@ -164,8 +167,7 @@ func JitterUntil(f func(), period time.Duration, jitterFactor float64, sliding b
 		select {
 		case <-stopCh:
 			return
-		case <-t.C:
-			sawTimeout = true
+		case <-t.C():
 		}
 	}
 }
@@ -203,6 +205,12 @@ var ErrWaitTimeout = errors.New("timed out waiting for the condition")
 // if the loop should be aborted.
 type ConditionFunc func() (done bool, err error)
 
+// runConditionWithCrashProtection runs a ConditionFunc with crash protection
+func runConditionWithCrashProtection(condition ConditionFunc) (bool, error) {
+	defer runtime.HandleCrash()
+	return condition()
+}
+
 // Backoff holds parameters applied to a Backoff function.
 type Backoff struct {
 	// The initial duration.
@@ -277,6 +285,92 @@ func contextForChannel(parentCh <-chan struct{}) (context.Context, context.Cance
 	return ctx, cancel
 }
 
+// BackoffManager manages backoff with a particular scheme based on its underlying implementation. It provides
+// an interface to return a timer for backoff, and caller shall backoff until Timer.C returns. If the second Backoff()
+// is called before the timer from the first Backoff() call finishes, the first timer will NOT be drained.
+// The BackoffManager is supposed to be called in a single-threaded environment.
+type BackoffManager interface {
+	Backoff() clock.Timer
+}
+
+type exponentialBackoffManagerImpl struct {
+	backoff              *Backoff
+	backoffTimer         clock.Timer
+	lastBackoffStart     time.Time
+	initialBackoff       time.Duration
+	backoffResetDuration time.Duration
+	clock                clock.Clock
+}
+
+// NewExponentialBackoffManager returns a manager for managing exponential backoff. Each backoff is jittered and
+// backoff will not exceed the given max. If the backoff is not called within resetDuration, the backoff is reset.
+// This backoff manager is used to reduce load during upstream unhealthiness.
+func NewExponentialBackoffManager(initBackoff, maxBackoff, resetDuration time.Duration, backoffFactor, jitter float64, c clock.Clock) BackoffManager {
+	return &exponentialBackoffManagerImpl{
+		backoff: &Backoff{
+			Duration: initBackoff,
+			Factor:   backoffFactor,
+			Jitter:   jitter,
+
+			// the current impl of wait.Backoff returns Backoff.Duration once steps are used up, which is not
+			// what we ideally need here, we set it to max int and assume we will never use up the steps
+			Steps: math.MaxInt32,
+			Cap:   maxBackoff,
+		},
+		backoffTimer:         c.NewTimer(0),
+		initialBackoff:       initBackoff,
+		lastBackoffStart:     c.Now(),
+		backoffResetDuration: resetDuration,
+		clock:                c,
+	}
+}
+
+func (b *exponentialBackoffManagerImpl) getNextBackoff() time.Duration {
+	if b.clock.Now().Sub(b.lastBackoffStart) > b.backoffResetDuration {
+		b.backoff.Steps = math.MaxInt32
+		b.backoff.Duration = b.initialBackoff
+	}
+	b.lastBackoffStart = b.clock.Now()
+	return b.backoff.Step()
+}
+
+// Backoff implements BackoffManager.Backoff, it returns a timer so caller can block on the timer for backoff.
+func (b *exponentialBackoffManagerImpl) Backoff() clock.Timer {
+	b.backoffTimer.Reset(b.getNextBackoff())
+	return b.backoffTimer
+}
+
+type jitteredBackoffManagerImpl struct {
+	clock        clock.Clock
+	duration     time.Duration
+	jitter       float64
+	backoffTimer clock.Timer
+}
+
+// NewJitteredBackoffManager returns a BackoffManager that backoffs with given duration plus given jitter. If the jitter
+// is negative, backoff will not be jittered.
+func NewJitteredBackoffManager(duration time.Duration, jitter float64, c clock.Clock) BackoffManager {
+	return &jitteredBackoffManagerImpl{
+		clock:        c,
+		duration:     duration,
+		jitter:       jitter,
+		backoffTimer: c.NewTimer(0),
+	}
+}
+
+func (j *jitteredBackoffManagerImpl) getNextBackoff() time.Duration {
+	jitteredPeriod := j.duration
+	if j.jitter > 0.0 {
+		jitteredPeriod = Jitter(j.duration, j.jitter)
+	}
+	return jitteredPeriod
+}
+
+func (j *jitteredBackoffManagerImpl) Backoff() clock.Timer {
+	j.backoffTimer.Reset(j.getNextBackoff())
+	return j.backoffTimer
+}
+
 // ExponentialBackoff repeats a condition check with exponential backoff.
 //
 // It repeatedly checks the condition and then sleeps, using `backoff.Step()`
@@ -289,7 +383,7 @@ func contextForChannel(parentCh <-chan struct{}) (context.Context, context.Cance
 // In all other cases, ErrWaitTimeout is returned.
 func ExponentialBackoff(backoff Backoff, condition ConditionFunc) error {
 	for backoff.Steps > 0 {
-		if ok, err := condition(); err != nil || ok {
+		if ok, err := runConditionWithCrashProtection(condition); err != nil || ok {
 			return err
 		}
 		if backoff.Steps == 1 {
@@ -335,7 +429,7 @@ func PollImmediate(interval, timeout time.Duration, condition ConditionFunc) err
 }
 
 func pollImmediateInternal(wait WaitFunc, condition ConditionFunc) error {
-	done, err := condition()
+	done, err := runConditionWithCrashProtection(condition)
 	if err != nil {
 		return err
 	}
@@ -364,7 +458,7 @@ func PollInfinite(interval time.Duration, condition ConditionFunc) error {
 // Some intervals may be missed if the condition takes too long or the time
 // window is too short.
 func PollImmediateInfinite(interval time.Duration, condition ConditionFunc) error {
-	done, err := condition()
+	done, err := runConditionWithCrashProtection(condition)
 	if err != nil {
 		return err
 	}
@@ -431,7 +525,7 @@ func WaitFor(wait WaitFunc, fn ConditionFunc, done <-chan struct{}) error {
 	for {
 		select {
 		case _, open := <-c:
-			ok, err := fn()
+			ok, err := runConditionWithCrashProtection(fn)
 			if err != nil {
 				return err
 			}
@@ -497,16 +591,3 @@ func poller(interval, timeout time.Duration) WaitFunc {
 		return ch
 	})
 }
-
-// resetOrReuseTimer avoids allocating a new timer if one is already in use.
-// Not safe for multiple threads.
-func resetOrReuseTimer(t *time.Timer, d time.Duration, sawTimeout bool) *time.Timer {
-	if t == nil {
-		return time.NewTimer(d)
-	}
-	if !t.Stop() && !sawTimeout {
-		<-t.C
-	}
-	t.Reset(d)
-	return t
-}
diff --git a/vendor/k8s.io/apimachinery/pkg/watch/watch.go b/vendor/k8s.io/apimachinery/pkg/watch/watch.go
index 3945be3a..988aba3e 100644
--- a/vendor/k8s.io/apimachinery/pkg/watch/watch.go
+++ b/vendor/k8s.io/apimachinery/pkg/watch/watch.go
@@ -90,7 +90,7 @@ func (w emptyWatch) ResultChan() <-chan Event {
 // FakeWatcher lets you test anything that consumes a watch.Interface; threadsafe.
 type FakeWatcher struct {
 	result  chan Event
-	Stopped bool
+	stopped bool
 	sync.Mutex
 }
 
@@ -110,24 +110,24 @@ func NewFakeWithChanSize(size int, blocking bool) *FakeWatcher {
 func (f *FakeWatcher) Stop() {
 	f.Lock()
 	defer f.Unlock()
-	if !f.Stopped {
+	if !f.stopped {
 		klog.V(4).Infof("Stopping fake watcher.")
 		close(f.result)
-		f.Stopped = true
+		f.stopped = true
 	}
 }
 
 func (f *FakeWatcher) IsStopped() bool {
 	f.Lock()
 	defer f.Unlock()
-	return f.Stopped
+	return f.stopped
 }
 
 // Reset prepares the watcher to be reused.
 func (f *FakeWatcher) Reset() {
 	f.Lock()
 	defer f.Unlock()
-	f.Stopped = false
+	f.stopped = false
 	f.result = make(chan Event)
 }
 
diff --git a/vendor/k8s.io/apimachinery/third_party/forked/golang/netutil/addr.go b/vendor/k8s.io/apimachinery/third_party/forked/golang/netutil/addr.go
new file mode 100644
index 00000000..c70f431c
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/third_party/forked/golang/netutil/addr.go
@@ -0,0 +1,27 @@
+package netutil
+
+import (
+	"net/url"
+	"strings"
+)
+
+// FROM: http://golang.org/src/net/http/client.go
+// Given a string of the form "host", "host:port", or "[ipv6::address]:port",
+// return true if the string includes a port.
+func hasPort(s string) bool { return strings.LastIndex(s, ":") > strings.LastIndex(s, "]") }
+
+// FROM: http://golang.org/src/net/http/transport.go
+var portMap = map[string]string{
+	"http":  "80",
+	"https": "443",
+}
+
+// FROM: http://golang.org/src/net/http/transport.go
+// canonicalAddr returns url.Host but always with a ":port" suffix
+func CanonicalAddr(url *url.URL) string {
+	addr := url.Host
+	if !hasPort(addr) {
+		return addr + ":" + portMap[url.Scheme]
+	}
+	return addr
+}
diff --git a/vendor/k8s.io/apiserver/pkg/apis/apiserver/types.go b/vendor/k8s.io/apiserver/pkg/apis/apiserver/types.go
index d21e7d63..4e414944 100644
--- a/vendor/k8s.io/apiserver/pkg/apis/apiserver/types.go
+++ b/vendor/k8s.io/apiserver/pkg/apis/apiserver/types.go
@@ -71,29 +71,78 @@ type EgressSelection struct {
 
 // Connection provides the configuration for a single egress selection client.
 type Connection struct {
-	// Type is the type of connection used to connect from client to konnectivity server.
-	// Currently supported values are "http-connect" and "direct".
-	Type string
+	// Protocol is the protocol used to connect from client to the konnectivity server.
+	ProxyProtocol ProtocolType
 
-	// httpConnect is the config needed to use http-connect to the konnectivity server.
+	// Transport defines the transport configurations we use to dial to the konnectivity server.
+	// This is required if ProxyProtocol is HTTPConnect or GRPC.
 	// +optional
-	HTTPConnect *HTTPConnectConfig
+	Transport *Transport
 }
 
-type HTTPConnectConfig struct {
+// ProtocolType is a set of valid values for Connection.ProtocolType
+type ProtocolType string
+
+// Valid types for ProtocolType for konnectivity server
+const (
+	// Use HTTPConnect to connect to konnectivity server
+	ProtocolHTTPConnect ProtocolType = "HTTPConnect"
+	// Use grpc to connect to konnectivity server
+	ProtocolGRPC ProtocolType = "GRPC"
+	// Connect directly (skip konnectivity server)
+	ProtocolDirect ProtocolType = "Direct"
+)
+
+// Transport defines the transport configurations we use to dial to the konnectivity server
+type Transport struct {
+	// TCP is the TCP configuration for communicating with the konnectivity server via TCP
+	// ProxyProtocol of GRPC is not supported with TCP transport at the moment
+	// Requires at least one of TCP or UDS to be set
+	// +optional
+	TCP *TCPTransport
+
+	// UDS is the UDS configuration for communicating with the konnectivity server via UDS
+	// Requires at least one of TCP or UDS to be set
+	// +optional
+	UDS *UDSTransport
+}
+
+// TCPTransport provides the information to connect to konnectivity server via TCP
+type TCPTransport struct {
 	// URL is the location of the konnectivity server to connect to.
 	// As an example it might be "https://127.0.0.1:8131"
 	URL string
 
-	// CABundle is the file location of the CA to be used to determine trust with the konnectivity server.
+	// TLSConfig is the config needed to use TLS when connecting to konnectivity server
+	// +optional
+	TLSConfig *TLSConfig
+}
+
+// UDSTransport provides the information to connect to konnectivity server via UDS
+type UDSTransport struct {
+	// UDSName is the name of the unix domain socket to connect to konnectivity server
+	// This does not use a unix:// prefix. (Eg: /etc/srv/kubernetes/konnectivity-server/konnectivity-server.socket)
+	UDSName string
+}
+
+// TLSConfig provides the authentication information to connect to konnectivity server
+// Only used with TCPTransport
+type TLSConfig struct {
+	// caBundle is the file location of the CA to be used to determine trust with the konnectivity server.
+	// Must be absent/empty if TCPTransport.URL is prefixed with http://
+	// If absent while TCPTransport.URL is prefixed with https://, default to system trust roots.
 	// +optional
 	CABundle string
 
-	// ClientKey is the file location of the client key to be used in mtls handshakes with the konnectivity server.
+	// clientKey is the file location of the client key to authenticate with the konnectivity server
+	// Must be absent/empty if TCPTransport.URL is prefixed with http://
+	// Must be configured if TCPTransport.URL is prefixed with https://
 	// +optional
 	ClientKey string
 
-	// ClientCert is the file location of the client certificate to be used in mtls handshakes with the konnectivity server.
+	// clientCert is the file location of the client certificate to authenticate with the konnectivity server
+	// Must be absent/empty if TCPTransport.URL is prefixed with http://
+	// Must be configured if TCPTransport.URL is prefixed with https://
 	// +optional
 	ClientCert string
 }
diff --git a/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/types.go b/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/types.go
index 10034d7c..7b9aacae 100644
--- a/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/types.go
+++ b/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/types.go
@@ -71,40 +71,78 @@ type EgressSelection struct {
 
 // Connection provides the configuration for a single egress selection client.
 type Connection struct {
-	// type is the type of connection used to connect from client to network/konnectivity server.
-	// Currently supported values are "http-connect" and "direct".
-	Type string `json:"type"`
+	// Protocol is the protocol used to connect from client to the konnectivity server.
+	ProxyProtocol ProtocolType `json:"proxyProtocol,omitempty"`
 
-	// httpConnect is the config needed to use http-connect to the konnectivity server.
-	// Absence when the type is "http-connect" will cause an error
-	// Presence when the type is "direct" will also cause an error
+	// Transport defines the transport configurations we use to dial to the konnectivity server.
+	// This is required if ProxyProtocol is HTTPConnect or GRPC.
 	// +optional
-	HTTPConnect *HTTPConnectConfig `json:"httpConnect,omitempty"`
+	Transport *Transport `json:"transport,omitempty"`
 }
 
-type HTTPConnectConfig struct {
-	// url is the location of the proxy server to connect to.
-	// As an example it might be "https://127.0.0.1:8131"
-	URL string `json:"url"`
+// ProtocolType is a set of valid values for Connection.ProtocolType
+type ProtocolType string
 
+// Valid types for ProtocolType for konnectivity server
+const (
+	// Use HTTPConnect to connect to konnectivity server
+	ProtocolHTTPConnect ProtocolType = "HTTPConnect"
+	// Use grpc to connect to konnectivity server
+	ProtocolGRPC ProtocolType = "GRPC"
+	// Connect directly (skip konnectivity server)
+	ProtocolDirect ProtocolType = "Direct"
+)
+
+// Transport defines the transport configurations we use to dial to the konnectivity server
+type Transport struct {
+	// TCP is the TCP configuration for communicating with the konnectivity server via TCP
+	// ProxyProtocol of GRPC is not supported with TCP transport at the moment
+	// Requires at least one of TCP or UDS to be set
+	// +optional
+	TCP *TCPTransport `json:"tcp,omitempty"`
+
+	// UDS is the UDS configuration for communicating with the konnectivity server via UDS
+	// Requires at least one of TCP or UDS to be set
+	// +optional
+	UDS *UDSTransport `json:"uds,omitempty"`
+}
+
+// TCPTransport provides the information to connect to konnectivity server via TCP
+type TCPTransport struct {
+	// URL is the location of the konnectivity server to connect to.
+	// As an example it might be "https://127.0.0.1:8131"
+	URL string `json:"url,omitempty"`
+
+	// TLSConfig is the config needed to use TLS when connecting to konnectivity server
+	// +optional
+	TLSConfig *TLSConfig `json:"tlsConfig,omitempty"`
+}
+
+// UDSTransport provides the information to connect to konnectivity server via UDS
+type UDSTransport struct {
+	// UDSName is the name of the unix domain socket to connect to konnectivity server
+	// This does not use a unix:// prefix. (Eg: /etc/srv/kubernetes/konnectivity-server/konnectivity-server.socket)
+	UDSName string `json:"udsName,omitempty"`
+}
+
+// TLSConfig provides the authentication information to connect to konnectivity server
+// Only used with TCPTransport
+type TLSConfig struct {
 	// caBundle is the file location of the CA to be used to determine trust with the konnectivity server.
-	// Must be absent/empty http-connect using the plain http
-	// Must be configured for http-connect using the https protocol
-	// Misconfiguration will cause an error
+	// Must be absent/empty if TCPTransport.URL is prefixed with http://
+	// If absent while TCPTransport.URL is prefixed with https://, default to system trust roots.
 	// +optional
 	CABundle string `json:"caBundle,omitempty"`
 
 	// clientKey is the file location of the client key to be used in mtls handshakes with the konnectivity server.
-	// Must be absent/empty http-connect using the plain http
-	// Must be configured for http-connect using the https protocol
-	// Misconfiguration will cause an error
+	// Must be absent/empty if TCPTransport.URL is prefixed with http://
+	// Must be configured if TCPTransport.URL is prefixed with https://
 	// +optional
 	ClientKey string `json:"clientKey,omitempty"`
 
 	// clientCert is the file location of the client certificate to be used in mtls handshakes with the konnectivity server.
-	// Must be absent/empty http-connect using the plain http
-	// Must be configured for http-connect using the https protocol
-	// Misconfiguration will cause an error
+	// Must be absent/empty if TCPTransport.URL is prefixed with http://
+	// Must be configured if TCPTransport.URL is prefixed with https://
 	// +optional
 	ClientCert string `json:"clientCert,omitempty"`
 }
diff --git a/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/zz_generated.conversion.go b/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/zz_generated.conversion.go
index 80352f02..9174b16d 100644
--- a/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/zz_generated.conversion.go
+++ b/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/zz_generated.conversion.go
@@ -85,13 +85,43 @@ func RegisterConversions(s *runtime.Scheme) error {
 	}); err != nil {
 		return err
 	}
-	if err := s.AddGeneratedConversionFunc((*HTTPConnectConfig)(nil), (*apiserver.HTTPConnectConfig)(nil), func(a, b interface{}, scope conversion.Scope) error {
-		return Convert_v1alpha1_HTTPConnectConfig_To_apiserver_HTTPConnectConfig(a.(*HTTPConnectConfig), b.(*apiserver.HTTPConnectConfig), scope)
+	if err := s.AddGeneratedConversionFunc((*TCPTransport)(nil), (*apiserver.TCPTransport)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_v1alpha1_TCPTransport_To_apiserver_TCPTransport(a.(*TCPTransport), b.(*apiserver.TCPTransport), scope)
 	}); err != nil {
 		return err
 	}
-	if err := s.AddGeneratedConversionFunc((*apiserver.HTTPConnectConfig)(nil), (*HTTPConnectConfig)(nil), func(a, b interface{}, scope conversion.Scope) error {
-		return Convert_apiserver_HTTPConnectConfig_To_v1alpha1_HTTPConnectConfig(a.(*apiserver.HTTPConnectConfig), b.(*HTTPConnectConfig), scope)
+	if err := s.AddGeneratedConversionFunc((*apiserver.TCPTransport)(nil), (*TCPTransport)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_apiserver_TCPTransport_To_v1alpha1_TCPTransport(a.(*apiserver.TCPTransport), b.(*TCPTransport), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddGeneratedConversionFunc((*TLSConfig)(nil), (*apiserver.TLSConfig)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_v1alpha1_TLSConfig_To_apiserver_TLSConfig(a.(*TLSConfig), b.(*apiserver.TLSConfig), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddGeneratedConversionFunc((*apiserver.TLSConfig)(nil), (*TLSConfig)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_apiserver_TLSConfig_To_v1alpha1_TLSConfig(a.(*apiserver.TLSConfig), b.(*TLSConfig), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddGeneratedConversionFunc((*Transport)(nil), (*apiserver.Transport)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_v1alpha1_Transport_To_apiserver_Transport(a.(*Transport), b.(*apiserver.Transport), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddGeneratedConversionFunc((*apiserver.Transport)(nil), (*Transport)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_apiserver_Transport_To_v1alpha1_Transport(a.(*apiserver.Transport), b.(*Transport), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddGeneratedConversionFunc((*UDSTransport)(nil), (*apiserver.UDSTransport)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_v1alpha1_UDSTransport_To_apiserver_UDSTransport(a.(*UDSTransport), b.(*apiserver.UDSTransport), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddGeneratedConversionFunc((*apiserver.UDSTransport)(nil), (*UDSTransport)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_apiserver_UDSTransport_To_v1alpha1_UDSTransport(a.(*apiserver.UDSTransport), b.(*UDSTransport), scope)
 	}); err != nil {
 		return err
 	}
@@ -143,8 +173,8 @@ func Convert_apiserver_AdmissionPluginConfiguration_To_v1alpha1_AdmissionPluginC
 }
 
 func autoConvert_v1alpha1_Connection_To_apiserver_Connection(in *Connection, out *apiserver.Connection, s conversion.Scope) error {
-	out.Type = in.Type
-	out.HTTPConnect = (*apiserver.HTTPConnectConfig)(unsafe.Pointer(in.HTTPConnect))
+	out.ProxyProtocol = apiserver.ProtocolType(in.ProxyProtocol)
+	out.Transport = (*apiserver.Transport)(unsafe.Pointer(in.Transport))
 	return nil
 }
 
@@ -154,8 +184,8 @@ func Convert_v1alpha1_Connection_To_apiserver_Connection(in *Connection, out *ap
 }
 
 func autoConvert_apiserver_Connection_To_v1alpha1_Connection(in *apiserver.Connection, out *Connection, s conversion.Scope) error {
-	out.Type = in.Type
-	out.HTTPConnect = (*HTTPConnectConfig)(unsafe.Pointer(in.HTTPConnect))
+	out.ProxyProtocol = ProtocolType(in.ProxyProtocol)
+	out.Transport = (*Transport)(unsafe.Pointer(in.Transport))
 	return nil
 }
 
@@ -210,28 +240,90 @@ func Convert_apiserver_EgressSelectorConfiguration_To_v1alpha1_EgressSelectorCon
 	return autoConvert_apiserver_EgressSelectorConfiguration_To_v1alpha1_EgressSelectorConfiguration(in, out, s)
 }
 
-func autoConvert_v1alpha1_HTTPConnectConfig_To_apiserver_HTTPConnectConfig(in *HTTPConnectConfig, out *apiserver.HTTPConnectConfig, s conversion.Scope) error {
+func autoConvert_v1alpha1_TCPTransport_To_apiserver_TCPTransport(in *TCPTransport, out *apiserver.TCPTransport, s conversion.Scope) error {
 	out.URL = in.URL
+	out.TLSConfig = (*apiserver.TLSConfig)(unsafe.Pointer(in.TLSConfig))
+	return nil
+}
+
+// Convert_v1alpha1_TCPTransport_To_apiserver_TCPTransport is an autogenerated conversion function.
+func Convert_v1alpha1_TCPTransport_To_apiserver_TCPTransport(in *TCPTransport, out *apiserver.TCPTransport, s conversion.Scope) error {
+	return autoConvert_v1alpha1_TCPTransport_To_apiserver_TCPTransport(in, out, s)
+}
+
+func autoConvert_apiserver_TCPTransport_To_v1alpha1_TCPTransport(in *apiserver.TCPTransport, out *TCPTransport, s conversion.Scope) error {
+	out.URL = in.URL
+	out.TLSConfig = (*TLSConfig)(unsafe.Pointer(in.TLSConfig))
+	return nil
+}
+
+// Convert_apiserver_TCPTransport_To_v1alpha1_TCPTransport is an autogenerated conversion function.
+func Convert_apiserver_TCPTransport_To_v1alpha1_TCPTransport(in *apiserver.TCPTransport, out *TCPTransport, s conversion.Scope) error {
+	return autoConvert_apiserver_TCPTransport_To_v1alpha1_TCPTransport(in, out, s)
+}
+
+func autoConvert_v1alpha1_TLSConfig_To_apiserver_TLSConfig(in *TLSConfig, out *apiserver.TLSConfig, s conversion.Scope) error {
 	out.CABundle = in.CABundle
 	out.ClientKey = in.ClientKey
 	out.ClientCert = in.ClientCert
 	return nil
 }
 
-// Convert_v1alpha1_HTTPConnectConfig_To_apiserver_HTTPConnectConfig is an autogenerated conversion function.
-func Convert_v1alpha1_HTTPConnectConfig_To_apiserver_HTTPConnectConfig(in *HTTPConnectConfig, out *apiserver.HTTPConnectConfig, s conversion.Scope) error {
-	return autoConvert_v1alpha1_HTTPConnectConfig_To_apiserver_HTTPConnectConfig(in, out, s)
+// Convert_v1alpha1_TLSConfig_To_apiserver_TLSConfig is an autogenerated conversion function.
+func Convert_v1alpha1_TLSConfig_To_apiserver_TLSConfig(in *TLSConfig, out *apiserver.TLSConfig, s conversion.Scope) error {
+	return autoConvert_v1alpha1_TLSConfig_To_apiserver_TLSConfig(in, out, s)
 }
 
-func autoConvert_apiserver_HTTPConnectConfig_To_v1alpha1_HTTPConnectConfig(in *apiserver.HTTPConnectConfig, out *HTTPConnectConfig, s conversion.Scope) error {
-	out.URL = in.URL
+func autoConvert_apiserver_TLSConfig_To_v1alpha1_TLSConfig(in *apiserver.TLSConfig, out *TLSConfig, s conversion.Scope) error {
 	out.CABundle = in.CABundle
 	out.ClientKey = in.ClientKey
 	out.ClientCert = in.ClientCert
 	return nil
 }
 
-// Convert_apiserver_HTTPConnectConfig_To_v1alpha1_HTTPConnectConfig is an autogenerated conversion function.
-func Convert_apiserver_HTTPConnectConfig_To_v1alpha1_HTTPConnectConfig(in *apiserver.HTTPConnectConfig, out *HTTPConnectConfig, s conversion.Scope) error {
-	return autoConvert_apiserver_HTTPConnectConfig_To_v1alpha1_HTTPConnectConfig(in, out, s)
+// Convert_apiserver_TLSConfig_To_v1alpha1_TLSConfig is an autogenerated conversion function.
+func Convert_apiserver_TLSConfig_To_v1alpha1_TLSConfig(in *apiserver.TLSConfig, out *TLSConfig, s conversion.Scope) error {
+	return autoConvert_apiserver_TLSConfig_To_v1alpha1_TLSConfig(in, out, s)
+}
+
+func autoConvert_v1alpha1_Transport_To_apiserver_Transport(in *Transport, out *apiserver.Transport, s conversion.Scope) error {
+	out.TCP = (*apiserver.TCPTransport)(unsafe.Pointer(in.TCP))
+	out.UDS = (*apiserver.UDSTransport)(unsafe.Pointer(in.UDS))
+	return nil
+}
+
+// Convert_v1alpha1_Transport_To_apiserver_Transport is an autogenerated conversion function.
+func Convert_v1alpha1_Transport_To_apiserver_Transport(in *Transport, out *apiserver.Transport, s conversion.Scope) error {
+	return autoConvert_v1alpha1_Transport_To_apiserver_Transport(in, out, s)
+}
+
+func autoConvert_apiserver_Transport_To_v1alpha1_Transport(in *apiserver.Transport, out *Transport, s conversion.Scope) error {
+	out.TCP = (*TCPTransport)(unsafe.Pointer(in.TCP))
+	out.UDS = (*UDSTransport)(unsafe.Pointer(in.UDS))
+	return nil
+}
+
+// Convert_apiserver_Transport_To_v1alpha1_Transport is an autogenerated conversion function.
+func Convert_apiserver_Transport_To_v1alpha1_Transport(in *apiserver.Transport, out *Transport, s conversion.Scope) error {
+	return autoConvert_apiserver_Transport_To_v1alpha1_Transport(in, out, s)
+}
+
+func autoConvert_v1alpha1_UDSTransport_To_apiserver_UDSTransport(in *UDSTransport, out *apiserver.UDSTransport, s conversion.Scope) error {
+	out.UDSName = in.UDSName
+	return nil
+}
+
+// Convert_v1alpha1_UDSTransport_To_apiserver_UDSTransport is an autogenerated conversion function.
+func Convert_v1alpha1_UDSTransport_To_apiserver_UDSTransport(in *UDSTransport, out *apiserver.UDSTransport, s conversion.Scope) error {
+	return autoConvert_v1alpha1_UDSTransport_To_apiserver_UDSTransport(in, out, s)
+}
+
+func autoConvert_apiserver_UDSTransport_To_v1alpha1_UDSTransport(in *apiserver.UDSTransport, out *UDSTransport, s conversion.Scope) error {
+	out.UDSName = in.UDSName
+	return nil
+}
+
+// Convert_apiserver_UDSTransport_To_v1alpha1_UDSTransport is an autogenerated conversion function.
+func Convert_apiserver_UDSTransport_To_v1alpha1_UDSTransport(in *apiserver.UDSTransport, out *UDSTransport, s conversion.Scope) error {
+	return autoConvert_apiserver_UDSTransport_To_v1alpha1_UDSTransport(in, out, s)
 }
diff --git a/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/zz_generated.deepcopy.go
index e8d60867..4498e408 100644
--- a/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/zz_generated.deepcopy.go
+++ b/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/zz_generated.deepcopy.go
@@ -80,10 +80,10 @@ func (in *AdmissionPluginConfiguration) DeepCopy() *AdmissionPluginConfiguration
 // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
 func (in *Connection) DeepCopyInto(out *Connection) {
 	*out = *in
-	if in.HTTPConnect != nil {
-		in, out := &in.HTTPConnect, &out.HTTPConnect
-		*out = new(HTTPConnectConfig)
-		**out = **in
+	if in.Transport != nil {
+		in, out := &in.Transport, &out.Transport
+		*out = new(Transport)
+		(*in).DeepCopyInto(*out)
 	}
 	return
 }
@@ -148,17 +148,80 @@ func (in *EgressSelectorConfiguration) DeepCopyObject() runtime.Object {
 }
 
 // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *HTTPConnectConfig) DeepCopyInto(out *HTTPConnectConfig) {
+func (in *TCPTransport) DeepCopyInto(out *TCPTransport) {
+	*out = *in
+	if in.TLSConfig != nil {
+		in, out := &in.TLSConfig, &out.TLSConfig
+		*out = new(TLSConfig)
+		**out = **in
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TCPTransport.
+func (in *TCPTransport) DeepCopy() *TCPTransport {
+	if in == nil {
+		return nil
+	}
+	out := new(TCPTransport)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *TLSConfig) DeepCopyInto(out *TLSConfig) {
 	*out = *in
 	return
 }
 
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPConnectConfig.
-func (in *HTTPConnectConfig) DeepCopy() *HTTPConnectConfig {
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TLSConfig.
+func (in *TLSConfig) DeepCopy() *TLSConfig {
 	if in == nil {
 		return nil
 	}
-	out := new(HTTPConnectConfig)
+	out := new(TLSConfig)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Transport) DeepCopyInto(out *Transport) {
+	*out = *in
+	if in.TCP != nil {
+		in, out := &in.TCP, &out.TCP
+		*out = new(TCPTransport)
+		(*in).DeepCopyInto(*out)
+	}
+	if in.UDS != nil {
+		in, out := &in.UDS, &out.UDS
+		*out = new(UDSTransport)
+		**out = **in
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Transport.
+func (in *Transport) DeepCopy() *Transport {
+	if in == nil {
+		return nil
+	}
+	out := new(Transport)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *UDSTransport) DeepCopyInto(out *UDSTransport) {
+	*out = *in
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UDSTransport.
+func (in *UDSTransport) DeepCopy() *UDSTransport {
+	if in == nil {
+		return nil
+	}
+	out := new(UDSTransport)
 	in.DeepCopyInto(out)
 	return out
 }
diff --git a/vendor/k8s.io/apiserver/pkg/apis/apiserver/zz_generated.deepcopy.go b/vendor/k8s.io/apiserver/pkg/apis/apiserver/zz_generated.deepcopy.go
index 3159f7c1..622f1b5d 100644
--- a/vendor/k8s.io/apiserver/pkg/apis/apiserver/zz_generated.deepcopy.go
+++ b/vendor/k8s.io/apiserver/pkg/apis/apiserver/zz_generated.deepcopy.go
@@ -80,10 +80,10 @@ func (in *AdmissionPluginConfiguration) DeepCopy() *AdmissionPluginConfiguration
 // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
 func (in *Connection) DeepCopyInto(out *Connection) {
 	*out = *in
-	if in.HTTPConnect != nil {
-		in, out := &in.HTTPConnect, &out.HTTPConnect
-		*out = new(HTTPConnectConfig)
-		**out = **in
+	if in.Transport != nil {
+		in, out := &in.Transport, &out.Transport
+		*out = new(Transport)
+		(*in).DeepCopyInto(*out)
 	}
 	return
 }
@@ -148,17 +148,80 @@ func (in *EgressSelectorConfiguration) DeepCopyObject() runtime.Object {
 }
 
 // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *HTTPConnectConfig) DeepCopyInto(out *HTTPConnectConfig) {
+func (in *TCPTransport) DeepCopyInto(out *TCPTransport) {
+	*out = *in
+	if in.TLSConfig != nil {
+		in, out := &in.TLSConfig, &out.TLSConfig
+		*out = new(TLSConfig)
+		**out = **in
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TCPTransport.
+func (in *TCPTransport) DeepCopy() *TCPTransport {
+	if in == nil {
+		return nil
+	}
+	out := new(TCPTransport)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *TLSConfig) DeepCopyInto(out *TLSConfig) {
 	*out = *in
 	return
 }
 
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPConnectConfig.
-func (in *HTTPConnectConfig) DeepCopy() *HTTPConnectConfig {
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TLSConfig.
+func (in *TLSConfig) DeepCopy() *TLSConfig {
 	if in == nil {
 		return nil
 	}
-	out := new(HTTPConnectConfig)
+	out := new(TLSConfig)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Transport) DeepCopyInto(out *Transport) {
+	*out = *in
+	if in.TCP != nil {
+		in, out := &in.TCP, &out.TCP
+		*out = new(TCPTransport)
+		(*in).DeepCopyInto(*out)
+	}
+	if in.UDS != nil {
+		in, out := &in.UDS, &out.UDS
+		*out = new(UDSTransport)
+		**out = **in
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Transport.
+func (in *Transport) DeepCopy() *Transport {
+	if in == nil {
+		return nil
+	}
+	out := new(Transport)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *UDSTransport) DeepCopyInto(out *UDSTransport) {
+	*out = *in
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UDSTransport.
+func (in *UDSTransport) DeepCopy() *UDSTransport {
+	if in == nil {
+		return nil
+	}
+	out := new(UDSTransport)
 	in.DeepCopyInto(out)
 	return out
 }
diff --git a/vendor/k8s.io/apiserver/pkg/apis/audit/v1/generated.pb.go b/vendor/k8s.io/apiserver/pkg/apis/audit/v1/generated.pb.go
index 6f5cb522..3d2f4441 100644
--- a/vendor/k8s.io/apiserver/pkg/apis/audit/v1/generated.pb.go
+++ b/vendor/k8s.io/apiserver/pkg/apis/audit/v1/generated.pb.go
@@ -47,7 +47,7 @@ var _ = math.Inf
 // is compatible with the proto package it is being compiled against.
 // A compilation error at this line likely means your copy of the
 // proto package needs to be updated.
-const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
+const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
 
 func (m *Event) Reset()      { *m = Event{} }
 func (*Event) ProtoMessage() {}
@@ -3101,6 +3101,7 @@ func (m *PolicyRule) Unmarshal(dAtA []byte) error {
 func skipGenerated(dAtA []byte) (n int, err error) {
 	l := len(dAtA)
 	iNdEx := 0
+	depth := 0
 	for iNdEx < l {
 		var wire uint64
 		for shift := uint(0); ; shift += 7 {
@@ -3132,10 +3133,8 @@ func skipGenerated(dAtA []byte) (n int, err error) {
 					break
 				}
 			}
-			return iNdEx, nil
 		case 1:
 			iNdEx += 8
-			return iNdEx, nil
 		case 2:
 			var length int
 			for shift := uint(0); ; shift += 7 {
@@ -3156,55 +3155,30 @@ func skipGenerated(dAtA []byte) (n int, err error) {
 				return 0, ErrInvalidLengthGenerated
 			}
 			iNdEx += length
-			if iNdEx < 0 {
-				return 0, ErrInvalidLengthGenerated
-			}
-			return iNdEx, nil
 		case 3:
-			for {
-				var innerWire uint64
-				var start int = iNdEx
-				for shift := uint(0); ; shift += 7 {
-					if shift >= 64 {
-						return 0, ErrIntOverflowGenerated
-					}
-					if iNdEx >= l {
-						return 0, io.ErrUnexpectedEOF
-					}
-					b := dAtA[iNdEx]
-					iNdEx++
-					innerWire |= (uint64(b) & 0x7F) << shift
-					if b < 0x80 {
-						break
-					}
-				}
-				innerWireType := int(innerWire & 0x7)
-				if innerWireType == 4 {
-					break
-				}
-				next, err := skipGenerated(dAtA[start:])
-				if err != nil {
-					return 0, err
-				}
-				iNdEx = start + next
-				if iNdEx < 0 {
-					return 0, ErrInvalidLengthGenerated
-				}
-			}
-			return iNdEx, nil
+			depth++
 		case 4:
-			return iNdEx, nil
+			if depth == 0 {
+				return 0, ErrUnexpectedEndOfGroupGenerated
+			}
+			depth--
 		case 5:
 			iNdEx += 4
-			return iNdEx, nil
 		default:
 			return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
 		}
+		if iNdEx < 0 {
+			return 0, ErrInvalidLengthGenerated
+		}
+		if depth == 0 {
+			return iNdEx, nil
+		}
 	}
-	panic("unreachable")
+	return 0, io.ErrUnexpectedEOF
 }
 
 var (
-	ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling")
-	ErrIntOverflowGenerated   = fmt.Errorf("proto: integer overflow")
+	ErrInvalidLengthGenerated        = fmt.Errorf("proto: negative length found during unmarshaling")
+	ErrIntOverflowGenerated          = fmt.Errorf("proto: integer overflow")
+	ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group")
 )
diff --git a/vendor/k8s.io/apiserver/pkg/apis/config/types.go b/vendor/k8s.io/apiserver/pkg/apis/config/types.go
index 5d4caaa5..5dddc97f 100644
--- a/vendor/k8s.io/apiserver/pkg/apis/config/types.go
+++ b/vendor/k8s.io/apiserver/pkg/apis/config/types.go
@@ -17,6 +17,8 @@ limitations under the License.
 package config
 
 import (
+	"fmt"
+
 	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
 )
 
@@ -74,6 +76,11 @@ type Key struct {
 	Secret string
 }
 
+// String implements Stringer interface in a log safe way.
+func (k Key) String() string {
+	return fmt.Sprintf("Name: %s, Secret: [REDACTED]", k.Name)
+}
+
 // IdentityConfiguration is an empty struct to allow identity transformer in provider configuration.
 type IdentityConfiguration struct{}
 
@@ -81,12 +88,13 @@ type IdentityConfiguration struct{}
 type KMSConfiguration struct {
 	// name is the name of the KMS plugin to be used.
 	Name string
-	// cacheSize is the maximum number of secrets which are cached in memory. The default value is 1000.
+	// cachesize is the maximum number of secrets which are cached in memory. The default value is 1000.
+	// Set to a negative value to disable caching.
 	// +optional
-	CacheSize int32
+	CacheSize *int32
 	// endpoint is the gRPC server listening address, for example "unix:///var/run/kms-provider.sock".
 	Endpoint string
-	// Timeout for gRPC calls to kms-plugin (ex. 5s). The default is 3 seconds.
+	// timeout for gRPC calls to kms-plugin (ex. 5s). The default is 3 seconds.
 	// +optional
 	Timeout *metav1.Duration
 }
diff --git a/vendor/k8s.io/apiserver/pkg/apis/config/v1/defaults.go b/vendor/k8s.io/apiserver/pkg/apis/config/v1/defaults.go
new file mode 100644
index 00000000..2d529651
--- /dev/null
+++ b/vendor/k8s.io/apiserver/pkg/apis/config/v1/defaults.go
@@ -0,0 +1,44 @@
+/*
+Copyright 2019 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import (
+	"time"
+
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	"k8s.io/apimachinery/pkg/runtime"
+)
+
+var (
+	defaultTimeout         = &metav1.Duration{Duration: 3 * time.Second}
+	defaultCacheSize int32 = 1000
+)
+
+func addDefaultingFuncs(scheme *runtime.Scheme) error {
+	return RegisterDefaults(scheme)
+}
+
+// SetDefaults_KMSConfiguration applies defaults to KMSConfiguration.
+func SetDefaults_KMSConfiguration(obj *KMSConfiguration) {
+	if obj.Timeout == nil {
+		obj.Timeout = defaultTimeout
+	}
+
+	if obj.CacheSize == nil {
+		obj.CacheSize = &defaultCacheSize
+	}
+}
diff --git a/vendor/k8s.io/apiserver/pkg/apis/config/v1/register.go b/vendor/k8s.io/apiserver/pkg/apis/config/v1/register.go
index 2e3ecfff..32b5634c 100644
--- a/vendor/k8s.io/apiserver/pkg/apis/config/v1/register.go
+++ b/vendor/k8s.io/apiserver/pkg/apis/config/v1/register.go
@@ -40,6 +40,7 @@ func init() {
 	// generated functions takes place in the generated files. The separation
 	// makes the code compile even when the generated files are missing.
 	localSchemeBuilder.Register(addKnownTypes)
+	localSchemeBuilder.Register(addDefaultingFuncs)
 }
 
 func addKnownTypes(scheme *runtime.Scheme) error {
diff --git a/vendor/k8s.io/apiserver/pkg/apis/config/v1/types.go b/vendor/k8s.io/apiserver/pkg/apis/config/v1/types.go
index 1ac701bb..d7d68d25 100644
--- a/vendor/k8s.io/apiserver/pkg/apis/config/v1/types.go
+++ b/vendor/k8s.io/apiserver/pkg/apis/config/v1/types.go
@@ -17,6 +17,8 @@ limitations under the License.
 package v1
 
 import (
+	"fmt"
+
 	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
 )
 
@@ -74,6 +76,11 @@ type Key struct {
 	Secret string `json:"secret"`
 }
 
+// String implements Stringer interface in a log safe way.
+func (k Key) String() string {
+	return fmt.Sprintf("Name: %s, Secret: [REDACTED]", k.Name)
+}
+
 // IdentityConfiguration is an empty struct to allow identity transformer in provider configuration.
 type IdentityConfiguration struct{}
 
@@ -81,12 +88,13 @@ type IdentityConfiguration struct{}
 type KMSConfiguration struct {
 	// name is the name of the KMS plugin to be used.
 	Name string `json:"name"`
-	// cacheSize is the maximum number of secrets which are cached in memory. The default value is 1000.
+	// cachesize is the maximum number of secrets which are cached in memory. The default value is 1000.
+	// Set to a negative value to disable caching.
 	// +optional
-	CacheSize int32 `json:"cachesize,omitempty"`
+	CacheSize *int32 `json:"cachesize,omitempty"`
 	// endpoint is the gRPC server listening address, for example "unix:///var/run/kms-provider.sock".
 	Endpoint string `json:"endpoint"`
-	// Timeout for gRPC calls to kms-plugin (ex. 5s). The default is 3 seconds.
+	// timeout for gRPC calls to kms-plugin (ex. 5s). The default is 3 seconds.
 	// +optional
 	Timeout *metav1.Duration `json:"timeout,omitempty"`
 }
diff --git a/vendor/k8s.io/apiserver/pkg/apis/config/v1/zz_generated.conversion.go b/vendor/k8s.io/apiserver/pkg/apis/config/v1/zz_generated.conversion.go
index 4a6843df..c7de6539 100644
--- a/vendor/k8s.io/apiserver/pkg/apis/config/v1/zz_generated.conversion.go
+++ b/vendor/k8s.io/apiserver/pkg/apis/config/v1/zz_generated.conversion.go
@@ -179,7 +179,7 @@ func Convert_config_IdentityConfiguration_To_v1_IdentityConfiguration(in *config
 
 func autoConvert_v1_KMSConfiguration_To_config_KMSConfiguration(in *KMSConfiguration, out *config.KMSConfiguration, s conversion.Scope) error {
 	out.Name = in.Name
-	out.CacheSize = in.CacheSize
+	out.CacheSize = (*int32)(unsafe.Pointer(in.CacheSize))
 	out.Endpoint = in.Endpoint
 	out.Timeout = (*metav1.Duration)(unsafe.Pointer(in.Timeout))
 	return nil
@@ -192,7 +192,7 @@ func Convert_v1_KMSConfiguration_To_config_KMSConfiguration(in *KMSConfiguration
 
 func autoConvert_config_KMSConfiguration_To_v1_KMSConfiguration(in *config.KMSConfiguration, out *KMSConfiguration, s conversion.Scope) error {
 	out.Name = in.Name
-	out.CacheSize = in.CacheSize
+	out.CacheSize = (*int32)(unsafe.Pointer(in.CacheSize))
 	out.Endpoint = in.Endpoint
 	out.Timeout = (*metav1.Duration)(unsafe.Pointer(in.Timeout))
 	return nil
diff --git a/vendor/k8s.io/apiserver/pkg/apis/config/v1/zz_generated.deepcopy.go b/vendor/k8s.io/apiserver/pkg/apis/config/v1/zz_generated.deepcopy.go
index 9bd7732b..dcb4e855 100644
--- a/vendor/k8s.io/apiserver/pkg/apis/config/v1/zz_generated.deepcopy.go
+++ b/vendor/k8s.io/apiserver/pkg/apis/config/v1/zz_generated.deepcopy.go
@@ -97,6 +97,11 @@ func (in *IdentityConfiguration) DeepCopy() *IdentityConfiguration {
 // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
 func (in *KMSConfiguration) DeepCopyInto(out *KMSConfiguration) {
 	*out = *in
+	if in.CacheSize != nil {
+		in, out := &in.CacheSize, &out.CacheSize
+		*out = new(int32)
+		**out = **in
+	}
 	if in.Timeout != nil {
 		in, out := &in.Timeout, &out.Timeout
 		*out = new(metav1.Duration)
diff --git a/vendor/k8s.io/apiserver/pkg/apis/config/v1/zz_generated.defaults.go b/vendor/k8s.io/apiserver/pkg/apis/config/v1/zz_generated.defaults.go
index cce2e603..1c8db8d0 100644
--- a/vendor/k8s.io/apiserver/pkg/apis/config/v1/zz_generated.defaults.go
+++ b/vendor/k8s.io/apiserver/pkg/apis/config/v1/zz_generated.defaults.go
@@ -28,5 +28,18 @@ import (
 // Public to allow building arbitrary schemes.
 // All generated defaulters are covering - they call all nested defaulters.
 func RegisterDefaults(scheme *runtime.Scheme) error {
+	scheme.AddTypeDefaultingFunc(&EncryptionConfiguration{}, func(obj interface{}) { SetObjectDefaults_EncryptionConfiguration(obj.(*EncryptionConfiguration)) })
 	return nil
 }
+
+func SetObjectDefaults_EncryptionConfiguration(in *EncryptionConfiguration) {
+	for i := range in.Resources {
+		a := &in.Resources[i]
+		for j := range a.Providers {
+			b := &a.Providers[j]
+			if b.KMS != nil {
+				SetDefaults_KMSConfiguration(b.KMS)
+			}
+		}
+	}
+}
diff --git a/vendor/k8s.io/apiserver/pkg/apis/config/zz_generated.deepcopy.go b/vendor/k8s.io/apiserver/pkg/apis/config/zz_generated.deepcopy.go
index ce15176e..dd66315e 100644
--- a/vendor/k8s.io/apiserver/pkg/apis/config/zz_generated.deepcopy.go
+++ b/vendor/k8s.io/apiserver/pkg/apis/config/zz_generated.deepcopy.go
@@ -97,6 +97,11 @@ func (in *IdentityConfiguration) DeepCopy() *IdentityConfiguration {
 // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
 func (in *KMSConfiguration) DeepCopyInto(out *KMSConfiguration) {
 	*out = *in
+	if in.CacheSize != nil {
+		in, out := &in.CacheSize, &out.CacheSize
+		*out = new(int32)
+		**out = **in
+	}
 	if in.Timeout != nil {
 		in, out := &in.Timeout, &out.Timeout
 		*out = new(v1.Duration)
diff --git a/vendor/k8s.io/cli-runtime/LICENSE b/vendor/k8s.io/cli-runtime/LICENSE
new file mode 100644
index 00000000..d6456956
--- /dev/null
+++ b/vendor/k8s.io/cli-runtime/LICENSE
@@ -0,0 +1,202 @@
+
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff --git a/vendor/k8s.io/cli-runtime/pkg/genericclioptions/builder_flags.go b/vendor/k8s.io/cli-runtime/pkg/genericclioptions/builder_flags.go
new file mode 100644
index 00000000..f695fb5f
--- /dev/null
+++ b/vendor/k8s.io/cli-runtime/pkg/genericclioptions/builder_flags.go
@@ -0,0 +1,220 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package genericclioptions
+
+import (
+	"github.com/spf13/pflag"
+
+	"k8s.io/apimachinery/pkg/runtime"
+	"k8s.io/cli-runtime/pkg/resource"
+)
+
+// ResourceBuilderFlags are flags for finding resources
+// TODO(juanvallejo): wire --local flag from commands through
+type ResourceBuilderFlags struct {
+	FileNameFlags *FileNameFlags
+
+	LabelSelector *string
+	FieldSelector *string
+	AllNamespaces *bool
+	All           *bool
+	Local         *bool
+
+	Scheme           *runtime.Scheme
+	Latest           bool
+	StopOnFirstError bool
+}
+
+// NewResourceBuilderFlags returns a default ResourceBuilderFlags
+func NewResourceBuilderFlags() *ResourceBuilderFlags {
+	filenames := []string{}
+
+	return &ResourceBuilderFlags{
+		FileNameFlags: &FileNameFlags{
+			Usage:     "identifying the resource.",
+			Filenames: &filenames,
+			Recursive: boolPtr(true),
+		},
+	}
+}
+
+func (o *ResourceBuilderFlags) WithFile(recurse bool, files ...string) *ResourceBuilderFlags {
+	o.FileNameFlags = &FileNameFlags{
+		Usage:     "identifying the resource.",
+		Filenames: &files,
+		Recursive: boolPtr(recurse),
+	}
+
+	return o
+}
+
+func (o *ResourceBuilderFlags) WithLabelSelector(selector string) *ResourceBuilderFlags {
+	o.LabelSelector = &selector
+	return o
+}
+
+func (o *ResourceBuilderFlags) WithFieldSelector(selector string) *ResourceBuilderFlags {
+	o.FieldSelector = &selector
+	return o
+}
+
+func (o *ResourceBuilderFlags) WithAllNamespaces(defaultVal bool) *ResourceBuilderFlags {
+	o.AllNamespaces = &defaultVal
+	return o
+}
+
+func (o *ResourceBuilderFlags) WithAll(defaultVal bool) *ResourceBuilderFlags {
+	o.All = &defaultVal
+	return o
+}
+
+func (o *ResourceBuilderFlags) WithLocal(defaultVal bool) *ResourceBuilderFlags {
+	o.Local = &defaultVal
+	return o
+}
+
+func (o *ResourceBuilderFlags) WithScheme(scheme *runtime.Scheme) *ResourceBuilderFlags {
+	o.Scheme = scheme
+	return o
+}
+
+func (o *ResourceBuilderFlags) WithLatest() *ResourceBuilderFlags {
+	o.Latest = true
+	return o
+}
+
+func (o *ResourceBuilderFlags) StopOnError() *ResourceBuilderFlags {
+	o.StopOnFirstError = true
+	return o
+}
+
+// AddFlags registers flags for finding resources
+func (o *ResourceBuilderFlags) AddFlags(flagset *pflag.FlagSet) {
+	o.FileNameFlags.AddFlags(flagset)
+
+	if o.LabelSelector != nil {
+		flagset.StringVarP(o.LabelSelector, "selector", "l", *o.LabelSelector, "Selector (label query) to filter on, supports '=', '==', and '!='.(e.g. -l key1=value1,key2=value2)")
+	}
+	if o.FieldSelector != nil {
+		flagset.StringVar(o.FieldSelector, "field-selector", *o.FieldSelector, "Selector (field query) to filter on, supports '=', '==', and '!='.(e.g. --field-selector key1=value1,key2=value2). The server only supports a limited number of field queries per type.")
+	}
+	if o.AllNamespaces != nil {
+		flagset.BoolVarP(o.AllNamespaces, "all-namespaces", "A", *o.AllNamespaces, "If present, list the requested object(s) across all namespaces. Namespace in current context is ignored even if specified with --namespace.")
+	}
+	if o.All != nil {
+		flagset.BoolVar(o.All, "all", *o.All, "Select all resources in the namespace of the specified resource types")
+	}
+	if o.Local != nil {
+		flagset.BoolVar(o.Local, "local", *o.Local, "If true, annotation will NOT contact api-server but run locally.")
+	}
+}
+
+// ToBuilder gives you back a resource finder to visit resources that are located
+func (o *ResourceBuilderFlags) ToBuilder(restClientGetter RESTClientGetter, resources []string) ResourceFinder {
+	namespace, enforceNamespace, namespaceErr := restClientGetter.ToRawKubeConfigLoader().Namespace()
+
+	builder := resource.NewBuilder(restClientGetter).
+		NamespaceParam(namespace).DefaultNamespace()
+
+	if o.AllNamespaces != nil {
+		builder.AllNamespaces(*o.AllNamespaces)
+	}
+
+	if o.Scheme != nil {
+		builder.WithScheme(o.Scheme, o.Scheme.PrioritizedVersionsAllGroups()...)
+	} else {
+		builder.Unstructured()
+	}
+
+	if o.FileNameFlags != nil {
+		opts := o.FileNameFlags.ToOptions()
+		builder.FilenameParam(enforceNamespace, &opts)
+	}
+
+	if o.Local == nil || !*o.Local {
+		// resource type/name tuples only work non-local
+		if o.All != nil {
+			builder.ResourceTypeOrNameArgs(*o.All, resources...)
+		} else {
+			builder.ResourceTypeOrNameArgs(false, resources...)
+		}
+		// label selectors only work non-local (for now)
+		if o.LabelSelector != nil {
+			builder.LabelSelectorParam(*o.LabelSelector)
+		}
+		// field selectors only work non-local (forever)
+		if o.FieldSelector != nil {
+			builder.FieldSelectorParam(*o.FieldSelector)
+		}
+		// latest only works non-local (forever)
+		if o.Latest {
+			builder.Latest()
+		}
+
+	} else {
+		builder.Local()
+
+		if len(resources) > 0 {
+			builder.AddError(resource.LocalResourceError)
+		}
+	}
+
+	if !o.StopOnFirstError {
+		builder.ContinueOnError()
+	}
+
+	return &ResourceFindBuilderWrapper{
+		builder: builder.
+			Flatten(). // I think we're going to recommend this everywhere
+			AddError(namespaceErr),
+	}
+}
+
+// ResourceFindBuilderWrapper wraps a builder in an interface
+type ResourceFindBuilderWrapper struct {
+	builder *resource.Builder
+}
+
+// Do finds you resources to check
+func (b *ResourceFindBuilderWrapper) Do() resource.Visitor {
+	return b.builder.Do()
+}
+
+// ResourceFinder allows mocking the resource builder
+// TODO resource builders needs to become more interfacey
+type ResourceFinder interface {
+	Do() resource.Visitor
+}
+
+// ResourceFinderFunc is a handy way to make a  ResourceFinder
+type ResourceFinderFunc func() resource.Visitor
+
+// Do implements ResourceFinder
+func (fn ResourceFinderFunc) Do() resource.Visitor {
+	return fn()
+}
+
+// ResourceFinderForResult skins a visitor for re-use as a ResourceFinder
+func ResourceFinderForResult(result resource.Visitor) ResourceFinder {
+	return ResourceFinderFunc(func() resource.Visitor {
+		return result
+	})
+}
+
+func boolPtr(val bool) *bool {
+	return &val
+}
diff --git a/vendor/k8s.io/cli-runtime/pkg/genericclioptions/builder_flags_fake.go b/vendor/k8s.io/cli-runtime/pkg/genericclioptions/builder_flags_fake.go
new file mode 100644
index 00000000..31038291
--- /dev/null
+++ b/vendor/k8s.io/cli-runtime/pkg/genericclioptions/builder_flags_fake.go
@@ -0,0 +1,54 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package genericclioptions
+
+import (
+	"k8s.io/cli-runtime/pkg/resource"
+)
+
+// NewSimpleResourceFinder builds a super simple ResourceFinder that just iterates over the objects you provided
+func NewSimpleFakeResourceFinder(infos ...*resource.Info) ResourceFinder {
+	return &fakeResourceFinder{
+		Infos: infos,
+	}
+}
+
+type fakeResourceFinder struct {
+	Infos []*resource.Info
+}
+
+// Do implements the interface
+func (f *fakeResourceFinder) Do() resource.Visitor {
+	return &fakeResourceResult{
+		Infos: f.Infos,
+	}
+}
+
+type fakeResourceResult struct {
+	Infos []*resource.Info
+}
+
+// Visit just iterates over info
+func (r *fakeResourceResult) Visit(fn resource.VisitorFunc) error {
+	for _, info := range r.Infos {
+		err := fn(info, nil)
+		if err != nil {
+			return err
+		}
+	}
+	return nil
+}
diff --git a/vendor/k8s.io/cli-runtime/pkg/genericclioptions/config_flags.go b/vendor/k8s.io/cli-runtime/pkg/genericclioptions/config_flags.go
new file mode 100644
index 00000000..5ded3e98
--- /dev/null
+++ b/vendor/k8s.io/cli-runtime/pkg/genericclioptions/config_flags.go
@@ -0,0 +1,382 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package genericclioptions
+
+import (
+	"errors"
+	"os"
+	"path/filepath"
+	"regexp"
+	"strings"
+	"sync"
+	"time"
+
+	"github.com/spf13/pflag"
+
+	"k8s.io/apimachinery/pkg/api/meta"
+	"k8s.io/client-go/discovery"
+	diskcached "k8s.io/client-go/discovery/cached/disk"
+	"k8s.io/client-go/rest"
+	"k8s.io/client-go/restmapper"
+	"k8s.io/client-go/tools/clientcmd"
+	"k8s.io/client-go/util/homedir"
+)
+
+const (
+	flagClusterName      = "cluster"
+	flagAuthInfoName     = "user"
+	flagContext          = "context"
+	flagNamespace        = "namespace"
+	flagAPIServer        = "server"
+	flagTLSServerName    = "tls-server-name"
+	flagInsecure         = "insecure-skip-tls-verify"
+	flagCertFile         = "client-certificate"
+	flagKeyFile          = "client-key"
+	flagCAFile           = "certificate-authority"
+	flagBearerToken      = "token"
+	flagImpersonate      = "as"
+	flagImpersonateGroup = "as-group"
+	flagUsername         = "username"
+	flagPassword         = "password"
+	flagTimeout          = "request-timeout"
+	flagHTTPCacheDir     = "cache-dir"
+)
+
+var (
+	defaultCacheDir = filepath.Join(homedir.HomeDir(), ".kube", "http-cache")
+
+	ErrEmptyConfig = errors.New(`Missing or incomplete configuration info.  Please point to an existing, complete config file:
+
+  1. Via the command-line flag --kubeconfig
+  2. Via the KUBECONFIG environment variable
+  3. In your home directory as ~/.kube/config
+
+To view or setup config directly use the 'config' command.`)
+)
+
+// RESTClientGetter is an interface that the ConfigFlags describe to provide an easier way to mock for commands
+// and eliminate the direct coupling to a struct type.  Users may wish to duplicate this type in their own packages
+// as per the golang type overlapping.
+type RESTClientGetter interface {
+	// ToRESTConfig returns restconfig
+	ToRESTConfig() (*rest.Config, error)
+	// ToDiscoveryClient returns discovery client
+	ToDiscoveryClient() (discovery.CachedDiscoveryInterface, error)
+	// ToRESTMapper returns a restmapper
+	ToRESTMapper() (meta.RESTMapper, error)
+	// ToRawKubeConfigLoader return kubeconfig loader as-is
+	ToRawKubeConfigLoader() clientcmd.ClientConfig
+}
+
+var _ RESTClientGetter = &ConfigFlags{}
+
+// ConfigFlags composes the set of values necessary
+// for obtaining a REST client config
+type ConfigFlags struct {
+	CacheDir   *string
+	KubeConfig *string
+
+	// config flags
+	ClusterName      *string
+	AuthInfoName     *string
+	Context          *string
+	Namespace        *string
+	APIServer        *string
+	TLSServerName    *string
+	Insecure         *bool
+	CertFile         *string
+	KeyFile          *string
+	CAFile           *string
+	BearerToken      *string
+	Impersonate      *string
+	ImpersonateGroup *[]string
+	Username         *string
+	Password         *string
+	Timeout          *string
+
+	clientConfig clientcmd.ClientConfig
+	lock         sync.Mutex
+	// If set to true, will use persistent client config and
+	// propagate the config to the places that need it, rather than
+	// loading the config multiple times
+	usePersistentConfig bool
+}
+
+// ToRESTConfig implements RESTClientGetter.
+// Returns a REST client configuration based on a provided path
+// to a .kubeconfig file, loading rules, and config flag overrides.
+// Expects the AddFlags method to have been called.
+func (f *ConfigFlags) ToRESTConfig() (*rest.Config, error) {
+	config, err := f.ToRawKubeConfigLoader().ClientConfig()
+	// replace client-go's ErrEmptyConfig error with our custom, more verbose version
+	if clientcmd.IsEmptyConfig(err) {
+		return nil, ErrEmptyConfig
+	}
+	return config, err
+}
+
+// ToRawKubeConfigLoader binds config flag values to config overrides
+// Returns an interactive clientConfig if the password flag is enabled,
+// or a non-interactive clientConfig otherwise.
+func (f *ConfigFlags) ToRawKubeConfigLoader() clientcmd.ClientConfig {
+	if f.usePersistentConfig {
+		return f.toRawKubePersistentConfigLoader()
+	}
+	return f.toRawKubeConfigLoader()
+}
+
+func (f *ConfigFlags) toRawKubeConfigLoader() clientcmd.ClientConfig {
+	loadingRules := clientcmd.NewDefaultClientConfigLoadingRules()
+	// use the standard defaults for this client command
+	// DEPRECATED: remove and replace with something more accurate
+	loadingRules.DefaultClientConfig = &clientcmd.DefaultClientConfig
+
+	if f.KubeConfig != nil {
+		loadingRules.ExplicitPath = *f.KubeConfig
+	}
+
+	overrides := &clientcmd.ConfigOverrides{ClusterDefaults: clientcmd.ClusterDefaults}
+
+	// bind auth info flag values to overrides
+	if f.CertFile != nil {
+		overrides.AuthInfo.ClientCertificate = *f.CertFile
+	}
+	if f.KeyFile != nil {
+		overrides.AuthInfo.ClientKey = *f.KeyFile
+	}
+	if f.BearerToken != nil {
+		overrides.AuthInfo.Token = *f.BearerToken
+	}
+	if f.Impersonate != nil {
+		overrides.AuthInfo.Impersonate = *f.Impersonate
+	}
+	if f.ImpersonateGroup != nil {
+		overrides.AuthInfo.ImpersonateGroups = *f.ImpersonateGroup
+	}
+	if f.Username != nil {
+		overrides.AuthInfo.Username = *f.Username
+	}
+	if f.Password != nil {
+		overrides.AuthInfo.Password = *f.Password
+	}
+
+	// bind cluster flags
+	if f.APIServer != nil {
+		overrides.ClusterInfo.Server = *f.APIServer
+	}
+	if f.TLSServerName != nil {
+		overrides.ClusterInfo.TLSServerName = *f.TLSServerName
+	}
+	if f.CAFile != nil {
+		overrides.ClusterInfo.CertificateAuthority = *f.CAFile
+	}
+	if f.Insecure != nil {
+		overrides.ClusterInfo.InsecureSkipTLSVerify = *f.Insecure
+	}
+
+	// bind context flags
+	if f.Context != nil {
+		overrides.CurrentContext = *f.Context
+	}
+	if f.ClusterName != nil {
+		overrides.Context.Cluster = *f.ClusterName
+	}
+	if f.AuthInfoName != nil {
+		overrides.Context.AuthInfo = *f.AuthInfoName
+	}
+	if f.Namespace != nil {
+		overrides.Context.Namespace = *f.Namespace
+	}
+
+	if f.Timeout != nil {
+		overrides.Timeout = *f.Timeout
+	}
+
+	var clientConfig clientcmd.ClientConfig
+
+	// we only have an interactive prompt when a password is allowed
+	if f.Password == nil {
+		clientConfig = clientcmd.NewNonInteractiveDeferredLoadingClientConfig(loadingRules, overrides)
+	} else {
+		clientConfig = clientcmd.NewInteractiveDeferredLoadingClientConfig(loadingRules, overrides, os.Stdin)
+	}
+
+	return clientConfig
+}
+
+// toRawKubePersistentConfigLoader binds config flag values to config overrides
+// Returns a persistent clientConfig for propagation.
+func (f *ConfigFlags) toRawKubePersistentConfigLoader() clientcmd.ClientConfig {
+	f.lock.Lock()
+	defer f.lock.Unlock()
+
+	if f.clientConfig == nil {
+		f.clientConfig = f.toRawKubeConfigLoader()
+	}
+
+	return f.clientConfig
+}
+
+// ToDiscoveryClient implements RESTClientGetter.
+// Expects the AddFlags method to have been called.
+// Returns a CachedDiscoveryInterface using a computed RESTConfig.
+func (f *ConfigFlags) ToDiscoveryClient() (discovery.CachedDiscoveryInterface, error) {
+	config, err := f.ToRESTConfig()
+	if err != nil {
+		return nil, err
+	}
+
+	// The more groups you have, the more discovery requests you need to make.
+	// given 25 groups (our groups + a few custom resources) with one-ish version each, discovery needs to make 50 requests
+	// double it just so we don't end up here again for a while.  This config is only used for discovery.
+	config.Burst = 100
+
+	// retrieve a user-provided value for the "cache-dir"
+	// defaulting to ~/.kube/http-cache if no user-value is given.
+	httpCacheDir := defaultCacheDir
+	if f.CacheDir != nil {
+		httpCacheDir = *f.CacheDir
+	}
+
+	discoveryCacheDir := computeDiscoverCacheDir(filepath.Join(homedir.HomeDir(), ".kube", "cache", "discovery"), config.Host)
+	return diskcached.NewCachedDiscoveryClientForConfig(config, discoveryCacheDir, httpCacheDir, time.Duration(10*time.Minute))
+}
+
+// ToRESTMapper returns a mapper.
+func (f *ConfigFlags) ToRESTMapper() (meta.RESTMapper, error) {
+	discoveryClient, err := f.ToDiscoveryClient()
+	if err != nil {
+		return nil, err
+	}
+
+	mapper := restmapper.NewDeferredDiscoveryRESTMapper(discoveryClient)
+	expander := restmapper.NewShortcutExpander(mapper, discoveryClient)
+	return expander, nil
+}
+
+// AddFlags binds client configuration flags to a given flagset
+func (f *ConfigFlags) AddFlags(flags *pflag.FlagSet) {
+	if f.KubeConfig != nil {
+		flags.StringVar(f.KubeConfig, "kubeconfig", *f.KubeConfig, "Path to the kubeconfig file to use for CLI requests.")
+	}
+	if f.CacheDir != nil {
+		flags.StringVar(f.CacheDir, flagHTTPCacheDir, *f.CacheDir, "Default HTTP cache directory")
+	}
+
+	// add config options
+	if f.CertFile != nil {
+		flags.StringVar(f.CertFile, flagCertFile, *f.CertFile, "Path to a client certificate file for TLS")
+	}
+	if f.KeyFile != nil {
+		flags.StringVar(f.KeyFile, flagKeyFile, *f.KeyFile, "Path to a client key file for TLS")
+	}
+	if f.BearerToken != nil {
+		flags.StringVar(f.BearerToken, flagBearerToken, *f.BearerToken, "Bearer token for authentication to the API server")
+	}
+	if f.Impersonate != nil {
+		flags.StringVar(f.Impersonate, flagImpersonate, *f.Impersonate, "Username to impersonate for the operation")
+	}
+	if f.ImpersonateGroup != nil {
+		flags.StringArrayVar(f.ImpersonateGroup, flagImpersonateGroup, *f.ImpersonateGroup, "Group to impersonate for the operation, this flag can be repeated to specify multiple groups.")
+	}
+	if f.Username != nil {
+		flags.StringVar(f.Username, flagUsername, *f.Username, "Username for basic authentication to the API server")
+	}
+	if f.Password != nil {
+		flags.StringVar(f.Password, flagPassword, *f.Password, "Password for basic authentication to the API server")
+	}
+	if f.ClusterName != nil {
+		flags.StringVar(f.ClusterName, flagClusterName, *f.ClusterName, "The name of the kubeconfig cluster to use")
+	}
+	if f.AuthInfoName != nil {
+		flags.StringVar(f.AuthInfoName, flagAuthInfoName, *f.AuthInfoName, "The name of the kubeconfig user to use")
+	}
+	if f.Namespace != nil {
+		flags.StringVarP(f.Namespace, flagNamespace, "n", *f.Namespace, "If present, the namespace scope for this CLI request")
+	}
+	if f.Context != nil {
+		flags.StringVar(f.Context, flagContext, *f.Context, "The name of the kubeconfig context to use")
+	}
+
+	if f.APIServer != nil {
+		flags.StringVarP(f.APIServer, flagAPIServer, "s", *f.APIServer, "The address and port of the Kubernetes API server")
+	}
+	if f.TLSServerName != nil {
+		flags.StringVar(f.TLSServerName, flagTLSServerName, *f.TLSServerName, "Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used")
+	}
+	if f.Insecure != nil {
+		flags.BoolVar(f.Insecure, flagInsecure, *f.Insecure, "If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure")
+	}
+	if f.CAFile != nil {
+		flags.StringVar(f.CAFile, flagCAFile, *f.CAFile, "Path to a cert file for the certificate authority")
+	}
+	if f.Timeout != nil {
+		flags.StringVar(f.Timeout, flagTimeout, *f.Timeout, "The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests.")
+	}
+
+}
+
+// WithDeprecatedPasswordFlag enables the username and password config flags
+func (f *ConfigFlags) WithDeprecatedPasswordFlag() *ConfigFlags {
+	f.Username = stringptr("")
+	f.Password = stringptr("")
+	return f
+}
+
+// NewConfigFlags returns ConfigFlags with default values set
+func NewConfigFlags(usePersistentConfig bool) *ConfigFlags {
+	impersonateGroup := []string{}
+	insecure := false
+
+	return &ConfigFlags{
+		Insecure:   &insecure,
+		Timeout:    stringptr("0"),
+		KubeConfig: stringptr(""),
+
+		CacheDir:         stringptr(defaultCacheDir),
+		ClusterName:      stringptr(""),
+		AuthInfoName:     stringptr(""),
+		Context:          stringptr(""),
+		Namespace:        stringptr(""),
+		APIServer:        stringptr(""),
+		TLSServerName:    stringptr(""),
+		CertFile:         stringptr(""),
+		KeyFile:          stringptr(""),
+		CAFile:           stringptr(""),
+		BearerToken:      stringptr(""),
+		Impersonate:      stringptr(""),
+		ImpersonateGroup: &impersonateGroup,
+
+		usePersistentConfig: usePersistentConfig,
+	}
+}
+
+func stringptr(val string) *string {
+	return &val
+}
+
+// overlyCautiousIllegalFileCharacters matches characters that *might* not be supported.  Windows is really restrictive, so this is really restrictive
+var overlyCautiousIllegalFileCharacters = regexp.MustCompile(`[^(\w/\.)]`)
+
+// computeDiscoverCacheDir takes the parentDir and the host and comes up with a "usually non-colliding" name.
+func computeDiscoverCacheDir(parentDir, host string) string {
+	// strip the optional scheme from host if its there:
+	schemelessHost := strings.Replace(strings.Replace(host, "https://", "", 1), "http://", "", 1)
+	// now do a simple collapse of non-AZ09 characters.  Collisions are possible but unlikely.  Even if we do collide the problem is short lived
+	safeHost := overlyCautiousIllegalFileCharacters.ReplaceAllString(schemelessHost, "_")
+	return filepath.Join(parentDir, safeHost)
+}
diff --git a/vendor/k8s.io/cli-runtime/pkg/genericclioptions/config_flags_fake.go b/vendor/k8s.io/cli-runtime/pkg/genericclioptions/config_flags_fake.go
new file mode 100644
index 00000000..64e9a688
--- /dev/null
+++ b/vendor/k8s.io/cli-runtime/pkg/genericclioptions/config_flags_fake.go
@@ -0,0 +1,110 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package genericclioptions
+
+import (
+	"fmt"
+
+	"k8s.io/apimachinery/pkg/api/meta"
+	"k8s.io/client-go/discovery"
+	"k8s.io/client-go/rest"
+	"k8s.io/client-go/restmapper"
+	"k8s.io/client-go/tools/clientcmd"
+	clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
+)
+
+type TestConfigFlags struct {
+	clientConfig    clientcmd.ClientConfig
+	discoveryClient discovery.CachedDiscoveryInterface
+	restMapper      meta.RESTMapper
+}
+
+func (f *TestConfigFlags) ToRawKubeConfigLoader() clientcmd.ClientConfig {
+	if f.clientConfig == nil {
+		panic("attempt to obtain a test RawKubeConfigLoader with no clientConfig specified")
+	}
+	return f.clientConfig
+}
+
+func (f *TestConfigFlags) ToRESTConfig() (*rest.Config, error) {
+	return f.ToRawKubeConfigLoader().ClientConfig()
+}
+
+func (f *TestConfigFlags) ToDiscoveryClient() (discovery.CachedDiscoveryInterface, error) {
+	return f.discoveryClient, nil
+}
+
+func (f *TestConfigFlags) ToRESTMapper() (meta.RESTMapper, error) {
+	if f.restMapper != nil {
+		return f.restMapper, nil
+	}
+	if f.discoveryClient != nil {
+		mapper := restmapper.NewDeferredDiscoveryRESTMapper(f.discoveryClient)
+		expander := restmapper.NewShortcutExpander(mapper, f.discoveryClient)
+		return expander, nil
+	}
+	return nil, fmt.Errorf("no restmapper")
+}
+
+func (f *TestConfigFlags) WithClientConfig(clientConfig clientcmd.ClientConfig) *TestConfigFlags {
+	f.clientConfig = clientConfig
+	return f
+}
+
+func (f *TestConfigFlags) WithRESTMapper(mapper meta.RESTMapper) *TestConfigFlags {
+	f.restMapper = mapper
+	return f
+}
+
+func (f *TestConfigFlags) WithDiscoveryClient(c discovery.CachedDiscoveryInterface) *TestConfigFlags {
+	f.discoveryClient = c
+	return f
+}
+
+func (f *TestConfigFlags) WithNamespace(ns string) *TestConfigFlags {
+	if f.clientConfig == nil {
+		panic("attempt to obtain a test RawKubeConfigLoader with no clientConfig specified")
+	}
+	f.clientConfig = &namespacedClientConfig{
+		delegate:  f.clientConfig,
+		namespace: ns,
+	}
+	return f
+}
+
+func NewTestConfigFlags() *TestConfigFlags {
+	return &TestConfigFlags{}
+}
+
+type namespacedClientConfig struct {
+	delegate  clientcmd.ClientConfig
+	namespace string
+}
+
+func (c *namespacedClientConfig) Namespace() (string, bool, error) {
+	return c.namespace, false, nil
+}
+
+func (c *namespacedClientConfig) RawConfig() (clientcmdapi.Config, error) {
+	return c.delegate.RawConfig()
+}
+func (c *namespacedClientConfig) ClientConfig() (*rest.Config, error) {
+	return c.delegate.ClientConfig()
+}
+func (c *namespacedClientConfig) ConfigAccess() clientcmd.ConfigAccess {
+	return c.delegate.ConfigAccess()
+}
diff --git a/vendor/k8s.io/cli-runtime/pkg/genericclioptions/doc.go b/vendor/k8s.io/cli-runtime/pkg/genericclioptions/doc.go
new file mode 100644
index 00000000..4796a8a4
--- /dev/null
+++ b/vendor/k8s.io/cli-runtime/pkg/genericclioptions/doc.go
@@ -0,0 +1,19 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package genericclioptions contains flags which can be added to you command, bound, completed, and produce
+// useful helper functions.  Nothing in this package can depend on kube/kube
+package genericclioptions // import "k8s.io/cli-runtime/pkg/genericclioptions"
diff --git a/vendor/k8s.io/cli-runtime/pkg/genericclioptions/filename_flags.go b/vendor/k8s.io/cli-runtime/pkg/genericclioptions/filename_flags.go
new file mode 100644
index 00000000..09e7b5be
--- /dev/null
+++ b/vendor/k8s.io/cli-runtime/pkg/genericclioptions/filename_flags.go
@@ -0,0 +1,79 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package genericclioptions
+
+import (
+	"strings"
+
+	"github.com/spf13/cobra"
+	"github.com/spf13/pflag"
+
+	"k8s.io/cli-runtime/pkg/resource"
+)
+
+// Usage of this struct by itself is discouraged.
+// These flags are composed by ResourceBuilderFlags
+// which should be used instead.
+type FileNameFlags struct {
+	Usage string
+
+	Filenames *[]string
+	Kustomize *string
+	Recursive *bool
+}
+
+func (o *FileNameFlags) ToOptions() resource.FilenameOptions {
+	options := resource.FilenameOptions{}
+
+	if o == nil {
+		return options
+	}
+
+	if o.Recursive != nil {
+		options.Recursive = *o.Recursive
+	}
+	if o.Filenames != nil {
+		options.Filenames = *o.Filenames
+	}
+	if o.Kustomize != nil {
+		options.Kustomize = *o.Kustomize
+	}
+
+	return options
+}
+
+func (o *FileNameFlags) AddFlags(flags *pflag.FlagSet) {
+	if o == nil {
+		return
+	}
+
+	if o.Recursive != nil {
+		flags.BoolVarP(o.Recursive, "recursive", "R", *o.Recursive, "Process the directory used in -f, --filename recursively. Useful when you want to manage related manifests organized within the same directory.")
+	}
+	if o.Filenames != nil {
+		flags.StringSliceVarP(o.Filenames, "filename", "f", *o.Filenames, o.Usage)
+		annotations := make([]string, 0, len(resource.FileExtensions))
+		for _, ext := range resource.FileExtensions {
+			annotations = append(annotations, strings.TrimLeft(ext, "."))
+		}
+		flags.SetAnnotation("filename", cobra.BashCompFilenameExt, annotations)
+	}
+	if o.Kustomize != nil {
+		flags.StringVarP(o.Kustomize, "kustomize", "k", *o.Kustomize,
+			"Process a kustomization directory. This flag can't be used together with -f or -R.")
+	}
+}
diff --git a/vendor/k8s.io/cli-runtime/pkg/genericclioptions/io_options.go b/vendor/k8s.io/cli-runtime/pkg/genericclioptions/io_options.go
new file mode 100644
index 00000000..4fc3a77b
--- /dev/null
+++ b/vendor/k8s.io/cli-runtime/pkg/genericclioptions/io_options.go
@@ -0,0 +1,57 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package genericclioptions
+
+import (
+	"bytes"
+	"io"
+	"io/ioutil"
+)
+
+// IOStreams provides the standard names for iostreams.  This is useful for embedding and for unit testing.
+// Inconsistent and different names make it hard to read and review code
+type IOStreams struct {
+	// In think, os.Stdin
+	In io.Reader
+	// Out think, os.Stdout
+	Out io.Writer
+	// ErrOut think, os.Stderr
+	ErrOut io.Writer
+}
+
+// NewTestIOStreams returns a valid IOStreams and in, out, errout buffers for unit tests
+func NewTestIOStreams() (IOStreams, *bytes.Buffer, *bytes.Buffer, *bytes.Buffer) {
+	in := &bytes.Buffer{}
+	out := &bytes.Buffer{}
+	errOut := &bytes.Buffer{}
+
+	return IOStreams{
+		In:     in,
+		Out:    out,
+		ErrOut: errOut,
+	}, in, out, errOut
+}
+
+// NewTestIOStreamsDiscard returns a valid IOStreams that just discards
+func NewTestIOStreamsDiscard() IOStreams {
+	in := &bytes.Buffer{}
+	return IOStreams{
+		In:     in,
+		Out:    ioutil.Discard,
+		ErrOut: ioutil.Discard,
+	}
+}
diff --git a/vendor/k8s.io/cli-runtime/pkg/genericclioptions/json_yaml_flags.go b/vendor/k8s.io/cli-runtime/pkg/genericclioptions/json_yaml_flags.go
new file mode 100644
index 00000000..a344eb0b
--- /dev/null
+++ b/vendor/k8s.io/cli-runtime/pkg/genericclioptions/json_yaml_flags.go
@@ -0,0 +1,68 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package genericclioptions
+
+import (
+	"strings"
+
+	"github.com/spf13/cobra"
+
+	"k8s.io/cli-runtime/pkg/printers"
+)
+
+func (f *JSONYamlPrintFlags) AllowedFormats() []string {
+	if f == nil {
+		return []string{}
+	}
+	return []string{"json", "yaml"}
+}
+
+// JSONYamlPrintFlags provides default flags necessary for json/yaml printing.
+// Given the following flag values, a printer can be requested that knows
+// how to handle printing based on these values.
+type JSONYamlPrintFlags struct {
+}
+
+// ToPrinter receives an outputFormat and returns a printer capable of
+// handling --output=(yaml|json) printing.
+// Returns false if the specified outputFormat does not match a supported format.
+// Supported Format types can be found in pkg/printers/printers.go
+func (f *JSONYamlPrintFlags) ToPrinter(outputFormat string) (printers.ResourcePrinter, error) {
+	var printer printers.ResourcePrinter
+
+	outputFormat = strings.ToLower(outputFormat)
+	switch outputFormat {
+	case "json":
+		printer = &printers.JSONPrinter{}
+	case "yaml":
+		printer = &printers.YAMLPrinter{}
+	default:
+		return nil, NoCompatiblePrinterError{OutputFormat: &outputFormat, AllowedFormats: f.AllowedFormats()}
+	}
+
+	return printer, nil
+}
+
+// AddFlags receives a *cobra.Command reference and binds
+// flags related to JSON or Yaml printing to it
+func (f *JSONYamlPrintFlags) AddFlags(c *cobra.Command) {}
+
+// NewJSONYamlPrintFlags returns flags associated with
+// yaml or json printing, with default values set.
+func NewJSONYamlPrintFlags() *JSONYamlPrintFlags {
+	return &JSONYamlPrintFlags{}
+}
diff --git a/vendor/k8s.io/cli-runtime/pkg/genericclioptions/jsonpath_flags.go b/vendor/k8s.io/cli-runtime/pkg/genericclioptions/jsonpath_flags.go
new file mode 100644
index 00000000..0ebd6a64
--- /dev/null
+++ b/vendor/k8s.io/cli-runtime/pkg/genericclioptions/jsonpath_flags.go
@@ -0,0 +1,130 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package genericclioptions
+
+import (
+	"fmt"
+	"io/ioutil"
+	"sort"
+	"strings"
+
+	"github.com/spf13/cobra"
+
+	"k8s.io/cli-runtime/pkg/printers"
+)
+
+// templates are logically optional for specifying a format.
+// this allows a user to specify a template format value
+// as --output=jsonpath=
+var jsonFormats = map[string]bool{
+	"jsonpath":      true,
+	"jsonpath-file": true,
+}
+
+// JSONPathPrintFlags provides default flags necessary for template printing.
+// Given the following flag values, a printer can be requested that knows
+// how to handle printing based on these values.
+type JSONPathPrintFlags struct {
+	// indicates if it is OK to ignore missing keys for rendering
+	// an output template.
+	AllowMissingKeys *bool
+	TemplateArgument *string
+}
+
+func (f *JSONPathPrintFlags) AllowedFormats() []string {
+	formats := make([]string, 0, len(jsonFormats))
+	for format := range jsonFormats {
+		formats = append(formats, format)
+	}
+	sort.Strings(formats)
+	return formats
+}
+
+// ToPrinter receives an templateFormat and returns a printer capable of
+// handling --template format printing.
+// Returns false if the specified templateFormat does not match a template format.
+func (f *JSONPathPrintFlags) ToPrinter(templateFormat string) (printers.ResourcePrinter, error) {
+	if (f.TemplateArgument == nil || len(*f.TemplateArgument) == 0) && len(templateFormat) == 0 {
+		return nil, NoCompatiblePrinterError{Options: f, OutputFormat: &templateFormat}
+	}
+
+	templateValue := ""
+
+	if f.TemplateArgument == nil || len(*f.TemplateArgument) == 0 {
+		for format := range jsonFormats {
+			format = format + "="
+			if strings.HasPrefix(templateFormat, format) {
+				templateValue = templateFormat[len(format):]
+				templateFormat = format[:len(format)-1]
+				break
+			}
+		}
+	} else {
+		templateValue = *f.TemplateArgument
+	}
+
+	if _, supportedFormat := jsonFormats[templateFormat]; !supportedFormat {
+		return nil, NoCompatiblePrinterError{OutputFormat: &templateFormat, AllowedFormats: f.AllowedFormats()}
+	}
+
+	if len(templateValue) == 0 {
+		return nil, fmt.Errorf("template format specified but no template given")
+	}
+
+	if templateFormat == "jsonpath-file" {
+		data, err := ioutil.ReadFile(templateValue)
+		if err != nil {
+			return nil, fmt.Errorf("error reading --template %s, %v\n", templateValue, err)
+		}
+
+		templateValue = string(data)
+	}
+
+	p, err := printers.NewJSONPathPrinter(templateValue)
+	if err != nil {
+		return nil, fmt.Errorf("error parsing jsonpath %s, %v\n", templateValue, err)
+	}
+
+	allowMissingKeys := true
+	if f.AllowMissingKeys != nil {
+		allowMissingKeys = *f.AllowMissingKeys
+	}
+
+	p.AllowMissingKeys(allowMissingKeys)
+	return p, nil
+}
+
+// AddFlags receives a *cobra.Command reference and binds
+// flags related to template printing to it
+func (f *JSONPathPrintFlags) AddFlags(c *cobra.Command) {
+	if f.TemplateArgument != nil {
+		c.Flags().StringVar(f.TemplateArgument, "template", *f.TemplateArgument, "Template string or path to template file to use when --output=jsonpath, --output=jsonpath-file.")
+		c.MarkFlagFilename("template")
+	}
+	if f.AllowMissingKeys != nil {
+		c.Flags().BoolVar(f.AllowMissingKeys, "allow-missing-template-keys", *f.AllowMissingKeys, "If true, ignore any errors in templates when a field or map key is missing in the template. Only applies to golang and jsonpath output formats.")
+	}
+}
+
+// NewJSONPathPrintFlags returns flags associated with
+// --template printing, with default values set.
+func NewJSONPathPrintFlags(templateValue string, allowMissingKeys bool) *JSONPathPrintFlags {
+	return &JSONPathPrintFlags{
+		TemplateArgument: &templateValue,
+		AllowMissingKeys: &allowMissingKeys,
+	}
+}
diff --git a/vendor/k8s.io/cli-runtime/pkg/genericclioptions/kube_template_flags.go b/vendor/k8s.io/cli-runtime/pkg/genericclioptions/kube_template_flags.go
new file mode 100644
index 00000000..df2b58c1
--- /dev/null
+++ b/vendor/k8s.io/cli-runtime/pkg/genericclioptions/kube_template_flags.go
@@ -0,0 +1,89 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package genericclioptions
+
+import (
+	"github.com/spf13/cobra"
+
+	"k8s.io/cli-runtime/pkg/printers"
+)
+
+// KubeTemplatePrintFlags composes print flags that provide both a JSONPath and a go-template printer.
+// This is necessary if dealing with cases that require support both both printers, since both sets of flags
+// require overlapping flags.
+type KubeTemplatePrintFlags struct {
+	GoTemplatePrintFlags *GoTemplatePrintFlags
+	JSONPathPrintFlags   *JSONPathPrintFlags
+
+	AllowMissingKeys *bool
+	TemplateArgument *string
+}
+
+func (f *KubeTemplatePrintFlags) AllowedFormats() []string {
+	if f == nil {
+		return []string{}
+	}
+	return append(f.GoTemplatePrintFlags.AllowedFormats(), f.JSONPathPrintFlags.AllowedFormats()...)
+}
+
+func (f *KubeTemplatePrintFlags) ToPrinter(outputFormat string) (printers.ResourcePrinter, error) {
+	if f == nil {
+		return nil, NoCompatiblePrinterError{}
+	}
+
+	if p, err := f.JSONPathPrintFlags.ToPrinter(outputFormat); !IsNoCompatiblePrinterError(err) {
+		return p, err
+	}
+	return f.GoTemplatePrintFlags.ToPrinter(outputFormat)
+}
+
+// AddFlags receives a *cobra.Command reference and binds
+// flags related to template printing to it
+func (f *KubeTemplatePrintFlags) AddFlags(c *cobra.Command) {
+	if f == nil {
+		return
+	}
+
+	if f.TemplateArgument != nil {
+		c.Flags().StringVar(f.TemplateArgument, "template", *f.TemplateArgument, "Template string or path to template file to use when -o=go-template, -o=go-template-file. The template format is golang templates [http://golang.org/pkg/text/template/#pkg-overview].")
+		c.MarkFlagFilename("template")
+	}
+	if f.AllowMissingKeys != nil {
+		c.Flags().BoolVar(f.AllowMissingKeys, "allow-missing-template-keys", *f.AllowMissingKeys, "If true, ignore any errors in templates when a field or map key is missing in the template. Only applies to golang and jsonpath output formats.")
+	}
+}
+
+// NewKubeTemplatePrintFlags returns flags associated with
+// --template printing, with default values set.
+func NewKubeTemplatePrintFlags() *KubeTemplatePrintFlags {
+	allowMissingKeysPtr := true
+	templateArgPtr := ""
+
+	return &KubeTemplatePrintFlags{
+		GoTemplatePrintFlags: &GoTemplatePrintFlags{
+			TemplateArgument: &templateArgPtr,
+			AllowMissingKeys: &allowMissingKeysPtr,
+		},
+		JSONPathPrintFlags: &JSONPathPrintFlags{
+			TemplateArgument: &templateArgPtr,
+			AllowMissingKeys: &allowMissingKeysPtr,
+		},
+
+		TemplateArgument: &templateArgPtr,
+		AllowMissingKeys: &allowMissingKeysPtr,
+	}
+}
diff --git a/vendor/k8s.io/cli-runtime/pkg/genericclioptions/name_flags.go b/vendor/k8s.io/cli-runtime/pkg/genericclioptions/name_flags.go
new file mode 100644
index 00000000..7aa89ab0
--- /dev/null
+++ b/vendor/k8s.io/cli-runtime/pkg/genericclioptions/name_flags.go
@@ -0,0 +1,81 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package genericclioptions
+
+import (
+	"fmt"
+	"strings"
+
+	"github.com/spf13/cobra"
+
+	"k8s.io/cli-runtime/pkg/printers"
+)
+
+// NamePrintFlags provides default flags necessary for printing
+// a resource's fully-qualified Kind.group/name, or a successful
+// message about that resource if an Operation is provided.
+type NamePrintFlags struct {
+	// Operation describes the name of the action that
+	// took place on an object, to be included in the
+	// finalized "successful" message.
+	Operation string
+}
+
+func (f *NamePrintFlags) Complete(successTemplate string) error {
+	f.Operation = fmt.Sprintf(successTemplate, f.Operation)
+	return nil
+}
+
+func (f *NamePrintFlags) AllowedFormats() []string {
+	if f == nil {
+		return []string{}
+	}
+	return []string{"name"}
+}
+
+// ToPrinter receives an outputFormat and returns a printer capable of
+// handling --output=name printing.
+// Returns false if the specified outputFormat does not match a supported format.
+// Supported format types can be found in pkg/printers/printers.go
+func (f *NamePrintFlags) ToPrinter(outputFormat string) (printers.ResourcePrinter, error) {
+	namePrinter := &printers.NamePrinter{
+		Operation: f.Operation,
+	}
+
+	outputFormat = strings.ToLower(outputFormat)
+	switch outputFormat {
+	case "name":
+		namePrinter.ShortOutput = true
+		fallthrough
+	case "":
+		return namePrinter, nil
+	default:
+		return nil, NoCompatiblePrinterError{OutputFormat: &outputFormat, AllowedFormats: f.AllowedFormats()}
+	}
+}
+
+// AddFlags receives a *cobra.Command reference and binds
+// flags related to name printing to it
+func (f *NamePrintFlags) AddFlags(c *cobra.Command) {}
+
+// NewNamePrintFlags returns flags associated with
+// --name printing, with default values set.
+func NewNamePrintFlags(operation string) *NamePrintFlags {
+	return &NamePrintFlags{
+		Operation: operation,
+	}
+}
diff --git a/vendor/k8s.io/cli-runtime/pkg/genericclioptions/print_flags.go b/vendor/k8s.io/cli-runtime/pkg/genericclioptions/print_flags.go
new file mode 100644
index 00000000..17b05c8c
--- /dev/null
+++ b/vendor/k8s.io/cli-runtime/pkg/genericclioptions/print_flags.go
@@ -0,0 +1,158 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package genericclioptions
+
+import (
+	"fmt"
+	"sort"
+	"strings"
+
+	"github.com/spf13/cobra"
+
+	"k8s.io/apimachinery/pkg/runtime"
+	"k8s.io/cli-runtime/pkg/printers"
+)
+
+type NoCompatiblePrinterError struct {
+	OutputFormat   *string
+	AllowedFormats []string
+	Options        interface{}
+}
+
+func (e NoCompatiblePrinterError) Error() string {
+	output := ""
+	if e.OutputFormat != nil {
+		output = *e.OutputFormat
+	}
+
+	sort.Strings(e.AllowedFormats)
+	return fmt.Sprintf("unable to match a printer suitable for the output format %q, allowed formats are: %s", output, strings.Join(e.AllowedFormats, ","))
+}
+
+func IsNoCompatiblePrinterError(err error) bool {
+	if err == nil {
+		return false
+	}
+
+	_, ok := err.(NoCompatiblePrinterError)
+	return ok
+}
+
+// PrintFlags composes common printer flag structs
+// used across all commands, and provides a method
+// of retrieving a known printer based on flag values provided.
+type PrintFlags struct {
+	JSONYamlPrintFlags   *JSONYamlPrintFlags
+	NamePrintFlags       *NamePrintFlags
+	TemplatePrinterFlags *KubeTemplatePrintFlags
+
+	TypeSetterPrinter *printers.TypeSetterPrinter
+
+	OutputFormat *string
+
+	// OutputFlagSpecified indicates whether the user specifically requested a certain kind of output.
+	// Using this function allows a sophisticated caller to change the flag binding logic if they so desire.
+	OutputFlagSpecified func() bool
+}
+
+func (f *PrintFlags) Complete(successTemplate string) error {
+	return f.NamePrintFlags.Complete(successTemplate)
+}
+
+func (f *PrintFlags) AllowedFormats() []string {
+	ret := []string{}
+	ret = append(ret, f.JSONYamlPrintFlags.AllowedFormats()...)
+	ret = append(ret, f.NamePrintFlags.AllowedFormats()...)
+	ret = append(ret, f.TemplatePrinterFlags.AllowedFormats()...)
+	return ret
+}
+
+func (f *PrintFlags) ToPrinter() (printers.ResourcePrinter, error) {
+	outputFormat := ""
+	if f.OutputFormat != nil {
+		outputFormat = *f.OutputFormat
+	}
+	// For backwards compatibility we want to support a --template argument given, even when no --output format is provided.
+	// If no explicit output format has been provided via the --output flag, fallback
+	// to honoring the --template argument.
+	templateFlagSpecified := f.TemplatePrinterFlags != nil &&
+		f.TemplatePrinterFlags.TemplateArgument != nil &&
+		len(*f.TemplatePrinterFlags.TemplateArgument) > 0
+	outputFlagSpecified := f.OutputFlagSpecified != nil && f.OutputFlagSpecified()
+	if templateFlagSpecified && !outputFlagSpecified {
+		outputFormat = "go-template"
+	}
+
+	if f.JSONYamlPrintFlags != nil {
+		if p, err := f.JSONYamlPrintFlags.ToPrinter(outputFormat); !IsNoCompatiblePrinterError(err) {
+			return f.TypeSetterPrinter.WrapToPrinter(p, err)
+		}
+	}
+
+	if f.NamePrintFlags != nil {
+		if p, err := f.NamePrintFlags.ToPrinter(outputFormat); !IsNoCompatiblePrinterError(err) {
+			return f.TypeSetterPrinter.WrapToPrinter(p, err)
+		}
+	}
+
+	if f.TemplatePrinterFlags != nil {
+		if p, err := f.TemplatePrinterFlags.ToPrinter(outputFormat); !IsNoCompatiblePrinterError(err) {
+			return f.TypeSetterPrinter.WrapToPrinter(p, err)
+		}
+	}
+
+	return nil, NoCompatiblePrinterError{OutputFormat: f.OutputFormat, AllowedFormats: f.AllowedFormats()}
+}
+
+func (f *PrintFlags) AddFlags(cmd *cobra.Command) {
+	f.JSONYamlPrintFlags.AddFlags(cmd)
+	f.NamePrintFlags.AddFlags(cmd)
+	f.TemplatePrinterFlags.AddFlags(cmd)
+
+	if f.OutputFormat != nil {
+		cmd.Flags().StringVarP(f.OutputFormat, "output", "o", *f.OutputFormat, fmt.Sprintf("Output format. One of: %s.", strings.Join(f.AllowedFormats(), "|")))
+		if f.OutputFlagSpecified == nil {
+			f.OutputFlagSpecified = func() bool {
+				return cmd.Flag("output").Changed
+			}
+		}
+	}
+}
+
+// WithDefaultOutput sets a default output format if one is not provided through a flag value
+func (f *PrintFlags) WithDefaultOutput(output string) *PrintFlags {
+	f.OutputFormat = &output
+	return f
+}
+
+// WithTypeSetter sets a wrapper than will surround the returned printer with a printer to type resources
+func (f *PrintFlags) WithTypeSetter(scheme *runtime.Scheme) *PrintFlags {
+	f.TypeSetterPrinter = printers.NewTypeSetter(scheme)
+	return f
+}
+
+func NewPrintFlags(operation string) *PrintFlags {
+	outputFormat := ""
+
+	return &PrintFlags{
+		OutputFormat: &outputFormat,
+
+		JSONYamlPrintFlags:   NewJSONYamlPrintFlags(),
+		NamePrintFlags:       NewNamePrintFlags(operation),
+		TemplatePrinterFlags: NewKubeTemplatePrintFlags(),
+	}
+}
diff --git a/vendor/k8s.io/cli-runtime/pkg/genericclioptions/record_flags.go b/vendor/k8s.io/cli-runtime/pkg/genericclioptions/record_flags.go
new file mode 100644
index 00000000..faf250d5
--- /dev/null
+++ b/vendor/k8s.io/cli-runtime/pkg/genericclioptions/record_flags.go
@@ -0,0 +1,199 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package genericclioptions
+
+import (
+	"os"
+	"path/filepath"
+	"strings"
+
+	"github.com/evanphx/json-patch"
+	"github.com/spf13/cobra"
+	"github.com/spf13/pflag"
+
+	"k8s.io/apimachinery/pkg/api/meta"
+	"k8s.io/apimachinery/pkg/runtime"
+	"k8s.io/apimachinery/pkg/util/json"
+)
+
+// ChangeCauseAnnotation is the annotation indicating a guess at "why" something was changed
+const ChangeCauseAnnotation = "kubernetes.io/change-cause"
+
+// RecordFlags contains all flags associated with the "--record" operation
+type RecordFlags struct {
+	// Record indicates the state of the recording flag.  It is a pointer so a caller can opt out or rebind
+	Record *bool
+
+	changeCause string
+}
+
+// ToRecorder returns a ChangeCause recorder if --record=false was not
+// explicitly given by the user
+func (f *RecordFlags) ToRecorder() (Recorder, error) {
+	if f == nil {
+		return NoopRecorder{}, nil
+	}
+
+	shouldRecord := false
+	if f.Record != nil {
+		shouldRecord = *f.Record
+	}
+
+	// if flag was explicitly set to false by the user,
+	// do not record
+	if !shouldRecord {
+		return NoopRecorder{}, nil
+	}
+
+	return &ChangeCauseRecorder{
+		changeCause: f.changeCause,
+	}, nil
+}
+
+// Complete is called before the command is run, but after it is invoked to finish the state of the struct before use.
+func (f *RecordFlags) Complete(cmd *cobra.Command) error {
+	if f == nil {
+		return nil
+	}
+
+	f.changeCause = parseCommandArguments(cmd)
+	return nil
+}
+
+func (f *RecordFlags) CompleteWithChangeCause(cause string) error {
+	if f == nil {
+		return nil
+	}
+
+	f.changeCause = cause
+	return nil
+}
+
+// AddFlags binds the requested flags to the provided flagset
+// TODO have this only take a flagset
+func (f *RecordFlags) AddFlags(cmd *cobra.Command) {
+	if f == nil {
+		return
+	}
+
+	if f.Record != nil {
+		cmd.Flags().BoolVar(f.Record, "record", *f.Record, "Record current kubectl command in the resource annotation. If set to false, do not record the command. If set to true, record the command. If not set, default to updating the existing annotation value only if one already exists.")
+	}
+}
+
+// NewRecordFlags provides a RecordFlags with reasonable default values set for use
+func NewRecordFlags() *RecordFlags {
+	record := false
+
+	return &RecordFlags{
+		Record: &record,
+	}
+}
+
+// Recorder is used to record why a runtime.Object was changed in an annotation.
+type Recorder interface {
+	// Record records why a runtime.Object was changed in an annotation.
+	Record(runtime.Object) error
+	MakeRecordMergePatch(runtime.Object) ([]byte, error)
+}
+
+// NoopRecorder does nothing.  It is a "do nothing" that can be returned so code doesn't switch on it.
+type NoopRecorder struct{}
+
+// Record implements Recorder
+func (r NoopRecorder) Record(obj runtime.Object) error {
+	return nil
+}
+
+// MakeRecordMergePatch implements Recorder
+func (r NoopRecorder) MakeRecordMergePatch(obj runtime.Object) ([]byte, error) {
+	return nil, nil
+}
+
+// ChangeCauseRecorder annotates a "change-cause" to an input runtime object
+type ChangeCauseRecorder struct {
+	changeCause string
+}
+
+// Record annotates a "change-cause" to a given info if either "shouldRecord" is true,
+// or the resource info previously contained a "change-cause" annotation.
+func (r *ChangeCauseRecorder) Record(obj runtime.Object) error {
+	accessor, err := meta.Accessor(obj)
+	if err != nil {
+		return err
+	}
+	annotations := accessor.GetAnnotations()
+	if annotations == nil {
+		annotations = make(map[string]string)
+	}
+	annotations[ChangeCauseAnnotation] = r.changeCause
+	accessor.SetAnnotations(annotations)
+	return nil
+}
+
+// MakeRecordMergePatch produces a merge patch for updating the recording annotation.
+func (r *ChangeCauseRecorder) MakeRecordMergePatch(obj runtime.Object) ([]byte, error) {
+	// copy so we don't mess with the original
+	objCopy := obj.DeepCopyObject()
+	if err := r.Record(objCopy); err != nil {
+		return nil, err
+	}
+
+	oldData, err := json.Marshal(obj)
+	if err != nil {
+		return nil, err
+	}
+	newData, err := json.Marshal(objCopy)
+	if err != nil {
+		return nil, err
+	}
+
+	return jsonpatch.CreateMergePatch(oldData, newData)
+}
+
+// parseCommandArguments will stringify and return all environment arguments ie. a command run by a client
+// using the factory.
+// Set showSecrets false to filter out stuff like secrets.
+func parseCommandArguments(cmd *cobra.Command) string {
+	if len(os.Args) == 0 {
+		return ""
+	}
+
+	flags := ""
+	parseFunc := func(flag *pflag.Flag, value string) error {
+		flags = flags + " --" + flag.Name
+		if set, ok := flag.Annotations["classified"]; !ok || len(set) == 0 {
+			flags = flags + "=" + value
+		} else {
+			flags = flags + "=CLASSIFIED"
+		}
+		return nil
+	}
+	var err error
+	err = cmd.Flags().ParseAll(os.Args[1:], parseFunc)
+	if err != nil || !cmd.Flags().Parsed() {
+		return ""
+	}
+
+	args := ""
+	if arguments := cmd.Flags().Args(); len(arguments) > 0 {
+		args = " " + strings.Join(arguments, " ")
+	}
+
+	base := filepath.Base(os.Args[0])
+	return base + args + flags
+}
diff --git a/vendor/k8s.io/cli-runtime/pkg/genericclioptions/template_flags.go b/vendor/k8s.io/cli-runtime/pkg/genericclioptions/template_flags.go
new file mode 100644
index 00000000..34291279
--- /dev/null
+++ b/vendor/k8s.io/cli-runtime/pkg/genericclioptions/template_flags.go
@@ -0,0 +1,135 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package genericclioptions
+
+import (
+	"fmt"
+	"io/ioutil"
+	"sort"
+	"strings"
+
+	"github.com/spf13/cobra"
+
+	"k8s.io/cli-runtime/pkg/printers"
+)
+
+// templates are logically optional for specifying a format.
+// this allows a user to specify a template format value
+// as --output=go-template=
+var templateFormats = map[string]bool{
+	"template":         true,
+	"go-template":      true,
+	"go-template-file": true,
+	"templatefile":     true,
+}
+
+// GoTemplatePrintFlags provides default flags necessary for template printing.
+// Given the following flag values, a printer can be requested that knows
+// how to handle printing based on these values.
+type GoTemplatePrintFlags struct {
+	// indicates if it is OK to ignore missing keys for rendering
+	// an output template.
+	AllowMissingKeys *bool
+	TemplateArgument *string
+}
+
+func (f *GoTemplatePrintFlags) AllowedFormats() []string {
+	formats := make([]string, 0, len(templateFormats))
+	for format := range templateFormats {
+		formats = append(formats, format)
+	}
+	sort.Strings(formats)
+	return formats
+}
+
+// ToPrinter receives an templateFormat and returns a printer capable of
+// handling --template format printing.
+// Returns false if the specified templateFormat does not match a template format.
+func (f *GoTemplatePrintFlags) ToPrinter(templateFormat string) (printers.ResourcePrinter, error) {
+	if (f.TemplateArgument == nil || len(*f.TemplateArgument) == 0) && len(templateFormat) == 0 {
+		return nil, NoCompatiblePrinterError{Options: f, OutputFormat: &templateFormat}
+	}
+
+	templateValue := ""
+
+	if f.TemplateArgument == nil || len(*f.TemplateArgument) == 0 {
+		for format := range templateFormats {
+			format = format + "="
+			if strings.HasPrefix(templateFormat, format) {
+				templateValue = templateFormat[len(format):]
+				templateFormat = format[:len(format)-1]
+				break
+			}
+		}
+	} else {
+		templateValue = *f.TemplateArgument
+	}
+
+	if _, supportedFormat := templateFormats[templateFormat]; !supportedFormat {
+		return nil, NoCompatiblePrinterError{OutputFormat: &templateFormat, AllowedFormats: f.AllowedFormats()}
+	}
+
+	if len(templateValue) == 0 {
+		return nil, fmt.Errorf("template format specified but no template given")
+	}
+
+	if templateFormat == "templatefile" || templateFormat == "go-template-file" {
+		data, err := ioutil.ReadFile(templateValue)
+		if err != nil {
+			return nil, fmt.Errorf("error reading --template %s, %v\n", templateValue, err)
+		}
+
+		templateValue = string(data)
+	}
+
+	p, err := printers.NewGoTemplatePrinter([]byte(templateValue))
+	if err != nil {
+		return nil, fmt.Errorf("error parsing template %s, %v\n", templateValue, err)
+	}
+
+	allowMissingKeys := true
+	if f.AllowMissingKeys != nil {
+		allowMissingKeys = *f.AllowMissingKeys
+	}
+
+	p.AllowMissingKeys(allowMissingKeys)
+	return p, nil
+}
+
+// AddFlags receives a *cobra.Command reference and binds
+// flags related to template printing to it
+func (f *GoTemplatePrintFlags) AddFlags(c *cobra.Command) {
+	if f.TemplateArgument != nil {
+		c.Flags().StringVar(f.TemplateArgument, "template", *f.TemplateArgument, "Template string or path to template file to use when -o=go-template, -o=go-template-file. The template format is golang templates [http://golang.org/pkg/text/template/#pkg-overview].")
+		c.MarkFlagFilename("template")
+	}
+	if f.AllowMissingKeys != nil {
+		c.Flags().BoolVar(f.AllowMissingKeys, "allow-missing-template-keys", *f.AllowMissingKeys, "If true, ignore any errors in templates when a field or map key is missing in the template. Only applies to golang and jsonpath output formats.")
+	}
+}
+
+// NewGoTemplatePrintFlags returns flags associated with
+// --template printing, with default values set.
+func NewGoTemplatePrintFlags() *GoTemplatePrintFlags {
+	allowMissingKeysPtr := true
+	templateValuePtr := ""
+
+	return &GoTemplatePrintFlags{
+		TemplateArgument: &templateValuePtr,
+		AllowMissingKeys: &allowMissingKeysPtr,
+	}
+}
diff --git a/vendor/k8s.io/cli-runtime/pkg/kustomize/builder.go b/vendor/k8s.io/cli-runtime/pkg/kustomize/builder.go
new file mode 100644
index 00000000..6aace7ce
--- /dev/null
+++ b/vendor/k8s.io/cli-runtime/pkg/kustomize/builder.go
@@ -0,0 +1,32 @@
+/*
+Copyright 2019 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package kustomize
+
+import (
+	"io"
+
+	"k8s.io/cli-runtime/pkg/kustomize/k8sdeps"
+	"sigs.k8s.io/kustomize/pkg/commands/build"
+	"sigs.k8s.io/kustomize/pkg/fs"
+)
+
+// RunKustomizeBuild runs kustomize build given a filesystem and a path
+func RunKustomizeBuild(out io.Writer, fSys fs.FileSystem, path string) error {
+	f := k8sdeps.NewFactory()
+	o := build.NewOptions(path, "")
+	return o.RunBuild(out, fSys, f.ResmapF, f.TransformerF)
+}
diff --git a/vendor/k8s.io/cli-runtime/pkg/kustomize/k8sdeps/configmapandsecret/configmapfactory.go b/vendor/k8s.io/cli-runtime/pkg/kustomize/k8sdeps/configmapandsecret/configmapfactory.go
new file mode 100644
index 00000000..9d40838a
--- /dev/null
+++ b/vendor/k8s.io/cli-runtime/pkg/kustomize/k8sdeps/configmapandsecret/configmapfactory.go
@@ -0,0 +1,125 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package configmapandsecret generates configmaps and secrets per generator rules.
+package configmapandsecret
+
+import (
+	"fmt"
+	"strings"
+	"unicode/utf8"
+
+	"github.com/pkg/errors"
+	corev1 "k8s.io/api/core/v1"
+	"k8s.io/apimachinery/pkg/util/validation"
+	"k8s.io/cli-runtime/pkg/kustomize/k8sdeps/kv"
+	"sigs.k8s.io/kustomize/pkg/ifc"
+	"sigs.k8s.io/kustomize/pkg/types"
+)
+
+// ConfigMapFactory makes ConfigMaps.
+type ConfigMapFactory struct {
+	ldr ifc.Loader
+}
+
+// NewConfigMapFactory returns a new ConfigMapFactory.
+func NewConfigMapFactory(l ifc.Loader) *ConfigMapFactory {
+	return &ConfigMapFactory{ldr: l}
+}
+
+func (f *ConfigMapFactory) makeFreshConfigMap(
+	args *types.ConfigMapArgs) *corev1.ConfigMap {
+	cm := &corev1.ConfigMap{}
+	cm.APIVersion = "v1"
+	cm.Kind = "ConfigMap"
+	cm.Name = args.Name
+	cm.Namespace = args.Namespace
+	cm.Data = map[string]string{}
+	return cm
+}
+
+// MakeConfigMap returns a new ConfigMap, or nil and an error.
+func (f *ConfigMapFactory) MakeConfigMap(
+	args *types.ConfigMapArgs, options *types.GeneratorOptions) (*corev1.ConfigMap, error) {
+	var all []kv.Pair
+	var err error
+	cm := f.makeFreshConfigMap(args)
+
+	pairs, err := keyValuesFromEnvFile(f.ldr, args.EnvSource)
+	if err != nil {
+		return nil, errors.Wrap(err, fmt.Sprintf(
+			"env source file: %s",
+			args.EnvSource))
+	}
+	all = append(all, pairs...)
+
+	pairs, err = keyValuesFromLiteralSources(args.LiteralSources)
+	if err != nil {
+		return nil, errors.Wrap(err, fmt.Sprintf(
+			"literal sources %v", args.LiteralSources))
+	}
+	all = append(all, pairs...)
+
+	pairs, err = keyValuesFromFileSources(f.ldr, args.FileSources)
+	if err != nil {
+		return nil, errors.Wrap(err, fmt.Sprintf(
+			"file sources: %v", args.FileSources))
+	}
+	all = append(all, pairs...)
+
+	for _, p := range all {
+		err = addKvToConfigMap(cm, p.Key, p.Value)
+		if err != nil {
+			return nil, err
+		}
+	}
+	if options != nil {
+		cm.SetLabels(options.Labels)
+		cm.SetAnnotations(options.Annotations)
+	}
+	return cm, nil
+}
+
+// addKvToConfigMap adds the given key and data to the given config map.
+// Error if key invalid, or already exists.
+func addKvToConfigMap(configMap *corev1.ConfigMap, keyName, data string) error {
+	// Note, the rules for ConfigMap keys are the exact same as the ones for SecretKeys.
+	if errs := validation.IsConfigMapKey(keyName); len(errs) != 0 {
+		return fmt.Errorf("%q is not a valid key name for a ConfigMap: %s", keyName, strings.Join(errs, ";"))
+	}
+
+	keyExistsErrorMsg := "cannot add key %s, another key by that name already exists: %v"
+
+	// If the configmap data contains byte sequences that are all in the UTF-8
+	// range, we will write it to .Data
+	if utf8.Valid([]byte(data)) {
+		if _, entryExists := configMap.Data[keyName]; entryExists {
+			return fmt.Errorf(keyExistsErrorMsg, keyName, configMap.Data)
+		}
+		configMap.Data[keyName] = data
+		return nil
+	}
+
+	// otherwise, it's BinaryData
+	if configMap.BinaryData == nil {
+		configMap.BinaryData = map[string][]byte{}
+	}
+	if _, entryExists := configMap.BinaryData[keyName]; entryExists {
+		return fmt.Errorf(keyExistsErrorMsg, keyName, configMap.BinaryData)
+	}
+	configMap.BinaryData[keyName] = []byte(data)
+	return nil
+}
diff --git a/vendor/k8s.io/cli-runtime/pkg/kustomize/k8sdeps/configmapandsecret/kv.go b/vendor/k8s.io/cli-runtime/pkg/kustomize/k8sdeps/configmapandsecret/kv.go
new file mode 100644
index 00000000..893dfefc
--- /dev/null
+++ b/vendor/k8s.io/cli-runtime/pkg/kustomize/k8sdeps/configmapandsecret/kv.go
@@ -0,0 +1,107 @@
+/*
+Copyright 2019 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package configmapandsecret
+
+import (
+	"fmt"
+	"path"
+	"strings"
+
+	"github.com/pkg/errors"
+	"k8s.io/cli-runtime/pkg/kustomize/k8sdeps/kv"
+	"sigs.k8s.io/kustomize/pkg/ifc"
+)
+
+func keyValuesFromLiteralSources(sources []string) ([]kv.Pair, error) {
+	var kvs []kv.Pair
+	for _, s := range sources {
+		k, v, err := parseLiteralSource(s)
+		if err != nil {
+			return nil, err
+		}
+		kvs = append(kvs, kv.Pair{Key: k, Value: v})
+	}
+	return kvs, nil
+}
+
+func keyValuesFromFileSources(ldr ifc.Loader, sources []string) ([]kv.Pair, error) {
+	var kvs []kv.Pair
+	for _, s := range sources {
+		k, fPath, err := parseFileSource(s)
+		if err != nil {
+			return nil, err
+		}
+		content, err := ldr.Load(fPath)
+		if err != nil {
+			return nil, err
+		}
+		kvs = append(kvs, kv.Pair{Key: k, Value: string(content)})
+	}
+	return kvs, nil
+}
+
+func keyValuesFromEnvFile(l ifc.Loader, path string) ([]kv.Pair, error) {
+	if path == "" {
+		return nil, nil
+	}
+	content, err := l.Load(path)
+	if err != nil {
+		return nil, err
+	}
+	return kv.KeyValuesFromLines(content)
+}
+
+// parseFileSource parses the source given.
+//
+//  Acceptable formats include:
+//   1.  source-path: the basename will become the key name
+//   2.  source-name=source-path: the source-name will become the key name and
+//       source-path is the path to the key file.
+//
+// Key names cannot include '='.
+func parseFileSource(source string) (keyName, filePath string, err error) {
+	numSeparators := strings.Count(source, "=")
+	switch {
+	case numSeparators == 0:
+		return path.Base(source), source, nil
+	case numSeparators == 1 && strings.HasPrefix(source, "="):
+		return "", "", fmt.Errorf("key name for file path %v missing", strings.TrimPrefix(source, "="))
+	case numSeparators == 1 && strings.HasSuffix(source, "="):
+		return "", "", fmt.Errorf("file path for key name %v missing", strings.TrimSuffix(source, "="))
+	case numSeparators > 1:
+		return "", "", errors.New("key names or file paths cannot contain '='")
+	default:
+		components := strings.Split(source, "=")
+		return components[0], components[1], nil
+	}
+}
+
+// parseLiteralSource parses the source key=val pair into its component pieces.
+// This functionality is distinguished from strings.SplitN(source, "=", 2) since
+// it returns an error in the case of empty keys, values, or a missing equals sign.
+func parseLiteralSource(source string) (keyName, value string, err error) {
+	// leading equal is invalid
+	if strings.Index(source, "=") == 0 {
+		return "", "", fmt.Errorf("invalid literal source %v, expected key=value", source)
+	}
+	// split after the first equal (so values can have the = character)
+	items := strings.SplitN(source, "=", 2)
+	if len(items) != 2 {
+		return "", "", fmt.Errorf("invalid literal source %v, expected key=value", source)
+	}
+	return items[0], strings.Trim(items[1], "\"'"), nil
+}
diff --git a/vendor/k8s.io/cli-runtime/pkg/kustomize/k8sdeps/configmapandsecret/secretfactory.go b/vendor/k8s.io/cli-runtime/pkg/kustomize/k8sdeps/configmapandsecret/secretfactory.go
new file mode 100644
index 00000000..97469f63
--- /dev/null
+++ b/vendor/k8s.io/cli-runtime/pkg/kustomize/k8sdeps/configmapandsecret/secretfactory.go
@@ -0,0 +1,106 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package configmapandsecret
+
+import (
+	"fmt"
+	"strings"
+
+	"github.com/pkg/errors"
+	corev1 "k8s.io/api/core/v1"
+	"k8s.io/apimachinery/pkg/util/validation"
+	"k8s.io/cli-runtime/pkg/kustomize/k8sdeps/kv"
+	"sigs.k8s.io/kustomize/pkg/ifc"
+	"sigs.k8s.io/kustomize/pkg/types"
+)
+
+// SecretFactory makes Secrets.
+type SecretFactory struct {
+	ldr ifc.Loader
+}
+
+// NewSecretFactory returns a new SecretFactory.
+func NewSecretFactory(ldr ifc.Loader) *SecretFactory {
+	return &SecretFactory{ldr: ldr}
+}
+
+func (f *SecretFactory) makeFreshSecret(args *types.SecretArgs) *corev1.Secret {
+	s := &corev1.Secret{}
+	s.APIVersion = "v1"
+	s.Kind = "Secret"
+	s.Name = args.Name
+	s.Namespace = args.Namespace
+	s.Type = corev1.SecretType(args.Type)
+	if s.Type == "" {
+		s.Type = corev1.SecretTypeOpaque
+	}
+	s.Data = map[string][]byte{}
+	return s
+}
+
+// MakeSecret returns a new secret.
+func (f *SecretFactory) MakeSecret(args *types.SecretArgs, options *types.GeneratorOptions) (*corev1.Secret, error) {
+	var all []kv.Pair
+	var err error
+	s := f.makeFreshSecret(args)
+
+	pairs, err := keyValuesFromEnvFile(f.ldr, args.EnvSource)
+	if err != nil {
+		return nil, errors.Wrap(err, fmt.Sprintf(
+			"env source file: %s",
+			args.EnvSource))
+	}
+	all = append(all, pairs...)
+
+	pairs, err = keyValuesFromLiteralSources(args.LiteralSources)
+	if err != nil {
+		return nil, errors.Wrap(err, fmt.Sprintf(
+			"literal sources %v", args.LiteralSources))
+	}
+	all = append(all, pairs...)
+
+	pairs, err = keyValuesFromFileSources(f.ldr, args.FileSources)
+	if err != nil {
+		return nil, errors.Wrap(err, fmt.Sprintf(
+			"file sources: %v", args.FileSources))
+	}
+	all = append(all, pairs...)
+
+	for _, p := range all {
+		err = addKvToSecret(s, p.Key, p.Value)
+		if err != nil {
+			return nil, err
+		}
+	}
+	if options != nil {
+		s.SetLabels(options.Labels)
+		s.SetAnnotations(options.Annotations)
+	}
+	return s, nil
+}
+
+func addKvToSecret(secret *corev1.Secret, keyName, data string) error {
+	// Note, the rules for SecretKeys  keys are the exact same as the ones for ConfigMap.
+	if errs := validation.IsConfigMapKey(keyName); len(errs) != 0 {
+		return fmt.Errorf("%q is not a valid key name for a Secret: %s", keyName, strings.Join(errs, ";"))
+	}
+	if _, entryExists := secret.Data[keyName]; entryExists {
+		return fmt.Errorf("cannot add key %s, another key by that name already exists", keyName)
+	}
+	secret.Data[keyName] = []byte(data)
+	return nil
+}
diff --git a/vendor/k8s.io/cli-runtime/pkg/kustomize/k8sdeps/doc.go b/vendor/k8s.io/cli-runtime/pkg/kustomize/k8sdeps/doc.go
new file mode 100644
index 00000000..c98cb8d6
--- /dev/null
+++ b/vendor/k8s.io/cli-runtime/pkg/kustomize/k8sdeps/doc.go
@@ -0,0 +1,76 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// It's possible that kustomize's features will be vendored into
+// the kubernetes/kubernetes repo and made available to kubectl
+// commands, while at the same time the kustomize program will
+// continue to exist as an independent CLI.  Vendoring snapshots
+// would be taken just before a kubectl release.
+//
+// This creates a problem in that freestanding-kustomize depends on
+// (for example):
+//
+//   https://github.com/kubernetes/apimachinery/
+//       tree/master/pkg/util/yaml
+//
+// It vendors that package into
+//   sigs.k8s.io/kustomize/vendor/k8s.io/apimachinery/
+//
+// Whereas kubectl-kustomize would have to depend on the "staging"
+// version of this code, located at
+//
+//   https://github.com/kubernetes/kubernetes/
+//       blob/master/staging/src/k8s.io/apimachinery/pkg/util/yaml
+//
+// which is "vendored" via symlinks:
+//   k8s.io/kubernetes/vendor/k8s.io/apimachinery
+// is a symlink to
+//   ../../staging/src/k8s.io/apimachinery
+//
+// The staging version is the canonical, under-development
+// version of the code that kubectl depends on, whereas the packages
+// at kubernetes/apimachinery are periodic snapshots of staging made
+// for outside tools to depend on.
+//
+// apimachinery isn't the only package that poses this problem, just
+// using it as a specific example.
+//
+// The kubectl binary cannot vendor in kustomize code that in
+// turn vendors in the non-staging packages.
+//
+// One way to fix some of this would be to copy code - a hard fork.
+// This has all the problems associated with a hard forking.
+//
+// Another way would be to break the kustomize repo into three:
+//
+// (1) kustomize - repo with the main() function,
+//     vendoring (2) and (3).
+//
+// (2) kustomize-libs - packages used by (1) with no
+//     apimachinery dependence.
+//
+// (3) kustomize-k8sdeps - A thin code layer that depends
+//     on (vendors) apimachinery to provide thin implementations
+//     to interfaces used in (2).
+//
+// The kubectl repo would then vendor from (2) only, and have
+// a local implementation of (3).  With that in mind, it's clear
+// that (3) doesn't have to be a repo; the kustomize version of
+// the thin layer can live directly in (1).
+//
+// This package is the code in (3), meant for kustomize.
+
+package k8sdeps
diff --git a/vendor/k8s.io/cli-runtime/pkg/kustomize/k8sdeps/factory.go b/vendor/k8s.io/cli-runtime/pkg/kustomize/k8sdeps/factory.go
new file mode 100644
index 00000000..a83b4bda
--- /dev/null
+++ b/vendor/k8s.io/cli-runtime/pkg/kustomize/k8sdeps/factory.go
@@ -0,0 +1,34 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package k8sdeps provides kustomize factory with k8s dependencies
+package k8sdeps
+
+import (
+	"k8s.io/cli-runtime/pkg/kustomize/k8sdeps/kunstruct"
+	"k8s.io/cli-runtime/pkg/kustomize/k8sdeps/transformer"
+	"k8s.io/cli-runtime/pkg/kustomize/k8sdeps/validator"
+	"sigs.k8s.io/kustomize/pkg/factory"
+)
+
+// NewFactory creates an instance of KustFactory using k8sdeps factories
+func NewFactory() *factory.KustFactory {
+	return factory.NewKustFactory(
+		kunstruct.NewKunstructuredFactoryImpl(),
+		validator.NewKustValidator(),
+		transformer.NewFactoryImpl(),
+	)
+}
diff --git a/vendor/k8s.io/cli-runtime/pkg/kustomize/k8sdeps/kunstruct/factory.go b/vendor/k8s.io/cli-runtime/pkg/kustomize/k8sdeps/kunstruct/factory.go
new file mode 100644
index 00000000..a1dec316
--- /dev/null
+++ b/vendor/k8s.io/cli-runtime/pkg/kustomize/k8sdeps/kunstruct/factory.go
@@ -0,0 +1,118 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package kunstruct
+
+import (
+	"bytes"
+	"fmt"
+	"io"
+	"strings"
+
+	"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
+	"k8s.io/apimachinery/pkg/util/yaml"
+	"k8s.io/cli-runtime/pkg/kustomize/k8sdeps/configmapandsecret"
+	"sigs.k8s.io/kustomize/pkg/ifc"
+	"sigs.k8s.io/kustomize/pkg/types"
+)
+
+// KunstructuredFactoryImpl hides construction using apimachinery types.
+type KunstructuredFactoryImpl struct {
+	cmFactory     *configmapandsecret.ConfigMapFactory
+	secretFactory *configmapandsecret.SecretFactory
+}
+
+var _ ifc.KunstructuredFactory = &KunstructuredFactoryImpl{}
+
+// NewKunstructuredFactoryImpl returns a factory.
+func NewKunstructuredFactoryImpl() ifc.KunstructuredFactory {
+	return &KunstructuredFactoryImpl{}
+}
+
+// SliceFromBytes returns a slice of Kunstructured.
+func (kf *KunstructuredFactoryImpl) SliceFromBytes(
+	in []byte) ([]ifc.Kunstructured, error) {
+	decoder := yaml.NewYAMLOrJSONDecoder(bytes.NewReader(in), 1024)
+	var result []ifc.Kunstructured
+	var err error
+	for err == nil || isEmptyYamlError(err) {
+		var out unstructured.Unstructured
+		err = decoder.Decode(&out)
+		if err == nil {
+			if len(out.Object) == 0 {
+				continue
+			}
+			err = kf.validate(out)
+			if err != nil {
+				return nil, err
+			}
+			result = append(result, &UnstructAdapter{Unstructured: out})
+		}
+	}
+	if err != io.EOF {
+		return nil, err
+	}
+	return result, nil
+}
+
+func isEmptyYamlError(err error) bool {
+	return strings.Contains(err.Error(), "is missing in 'null'")
+}
+
+// FromMap returns an instance of Kunstructured.
+func (kf *KunstructuredFactoryImpl) FromMap(
+	m map[string]interface{}) ifc.Kunstructured {
+	return &UnstructAdapter{Unstructured: unstructured.Unstructured{Object: m}}
+}
+
+// MakeConfigMap returns an instance of Kunstructured for ConfigMap
+func (kf *KunstructuredFactoryImpl) MakeConfigMap(args *types.ConfigMapArgs, options *types.GeneratorOptions) (ifc.Kunstructured, error) {
+	cm, err := kf.cmFactory.MakeConfigMap(args, options)
+	if err != nil {
+		return nil, err
+	}
+	return NewKunstructuredFromObject(cm)
+}
+
+// MakeSecret returns an instance of Kunstructured for Secret
+func (kf *KunstructuredFactoryImpl) MakeSecret(args *types.SecretArgs, options *types.GeneratorOptions) (ifc.Kunstructured, error) {
+	sec, err := kf.secretFactory.MakeSecret(args, options)
+	if err != nil {
+		return nil, err
+	}
+	return NewKunstructuredFromObject(sec)
+}
+
+// Set sets loader
+func (kf *KunstructuredFactoryImpl) Set(ldr ifc.Loader) {
+	kf.cmFactory = configmapandsecret.NewConfigMapFactory(ldr)
+	kf.secretFactory = configmapandsecret.NewSecretFactory(ldr)
+}
+
+// validate validates that u has kind and name
+// except for kind `List`, which doesn't require a name
+func (kf *KunstructuredFactoryImpl) validate(u unstructured.Unstructured) error {
+	kind := u.GetKind()
+	if kind == "" {
+		return fmt.Errorf("missing kind in object %v", u)
+	} else if strings.HasSuffix(kind, "List") {
+		return nil
+	}
+	if u.GetName() == "" {
+		return fmt.Errorf("missing metadata.name in object %v", u)
+	}
+	return nil
+}
diff --git a/vendor/k8s.io/cli-runtime/pkg/kustomize/k8sdeps/kunstruct/helper.go b/vendor/k8s.io/cli-runtime/pkg/kustomize/k8sdeps/kunstruct/helper.go
new file mode 100644
index 00000000..0675b961
--- /dev/null
+++ b/vendor/k8s.io/cli-runtime/pkg/kustomize/k8sdeps/kunstruct/helper.go
@@ -0,0 +1,71 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package kunstruct provides unstructured from api machinery and factory for creating unstructured
+package kunstruct
+
+import (
+	"fmt"
+	"strings"
+)
+
+func parseFields(path string) ([]string, error) {
+	if !strings.Contains(path, "[") {
+		return strings.Split(path, "."), nil
+	}
+
+	var fields []string
+	start := 0
+	insideParentheses := false
+	for i := range path {
+		switch path[i] {
+		case '.':
+			if !insideParentheses {
+				fields = append(fields, path[start:i])
+				start = i + 1
+			}
+		case '[':
+			if !insideParentheses {
+				if i == start {
+					start = i + 1
+				} else {
+					fields = append(fields, path[start:i])
+					start = i + 1
+				}
+				insideParentheses = true
+			} else {
+				return nil, fmt.Errorf("nested parentheses are not allowed: %s", path)
+			}
+		case ']':
+			if insideParentheses {
+				fields = append(fields, path[start:i])
+				start = i + 1
+				insideParentheses = false
+			} else {
+				return nil, fmt.Errorf("invalid field path %s", path)
+			}
+		}
+	}
+	if start < len(path)-1 {
+		fields = append(fields, path[start:])
+	}
+	for i, f := range fields {
+		if strings.HasPrefix(f, "\"") || strings.HasPrefix(f, "'") {
+			fields[i] = strings.Trim(f, "\"'")
+		}
+	}
+	return fields, nil
+}
diff --git a/vendor/k8s.io/cli-runtime/pkg/kustomize/k8sdeps/kunstruct/kunstruct.go b/vendor/k8s.io/cli-runtime/pkg/kustomize/k8sdeps/kunstruct/kunstruct.go
new file mode 100644
index 00000000..5ad306bf
--- /dev/null
+++ b/vendor/k8s.io/cli-runtime/pkg/kustomize/k8sdeps/kunstruct/kunstruct.go
@@ -0,0 +1,92 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package kunstruct provides unstructured from api machinery and factory for creating unstructured
+package kunstruct
+
+import (
+	"encoding/json"
+	"fmt"
+
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
+	"k8s.io/apimachinery/pkg/runtime"
+	"sigs.k8s.io/kustomize/pkg/gvk"
+	"sigs.k8s.io/kustomize/pkg/ifc"
+)
+
+var _ ifc.Kunstructured = &UnstructAdapter{}
+
+// UnstructAdapter wraps unstructured.Unstructured from
+// https://github.com/kubernetes/apimachinery/blob/master/
+//     pkg/apis/meta/v1/unstructured/unstructured.go
+// to isolate dependence on apimachinery.
+type UnstructAdapter struct {
+	unstructured.Unstructured
+}
+
+// NewKunstructuredFromObject returns a new instance of Kunstructured.
+func NewKunstructuredFromObject(obj runtime.Object) (ifc.Kunstructured, error) {
+	// Convert obj to a byte stream, then convert that to JSON (Unstructured).
+	marshaled, err := json.Marshal(obj)
+	if err != nil {
+		return &UnstructAdapter{}, err
+	}
+	var u unstructured.Unstructured
+	err = u.UnmarshalJSON(marshaled)
+	// creationTimestamp always 'null', remove it
+	u.SetCreationTimestamp(metav1.Time{})
+	return &UnstructAdapter{Unstructured: u}, err
+}
+
+// GetGvk returns the Gvk name of the object.
+func (fs *UnstructAdapter) GetGvk() gvk.Gvk {
+	x := fs.GroupVersionKind()
+	return gvk.Gvk{
+		Group:   x.Group,
+		Version: x.Version,
+		Kind:    x.Kind,
+	}
+}
+
+// Copy provides a copy behind an interface.
+func (fs *UnstructAdapter) Copy() ifc.Kunstructured {
+	return &UnstructAdapter{*fs.DeepCopy()}
+}
+
+// Map returns the unstructured content map.
+func (fs *UnstructAdapter) Map() map[string]interface{} {
+	return fs.Object
+}
+
+// SetMap overrides the unstructured content map.
+func (fs *UnstructAdapter) SetMap(m map[string]interface{}) {
+	fs.Object = m
+}
+
+// GetFieldValue returns value at the given fieldpath.
+func (fs *UnstructAdapter) GetFieldValue(path string) (string, error) {
+	fields, err := parseFields(path)
+	if err != nil {
+		return "", err
+	}
+	s, found, err := unstructured.NestedString(
+		fs.UnstructuredContent(), fields...)
+	if found || err != nil {
+		return s, err
+	}
+	return "", fmt.Errorf("no field named '%s'", path)
+}
diff --git a/vendor/k8s.io/cli-runtime/pkg/kustomize/k8sdeps/kv/kv.go b/vendor/k8s.io/cli-runtime/pkg/kustomize/k8sdeps/kv/kv.go
new file mode 100644
index 00000000..27b8b343
--- /dev/null
+++ b/vendor/k8s.io/cli-runtime/pkg/kustomize/k8sdeps/kv/kv.go
@@ -0,0 +1,102 @@
+/*
+Copyright 2019 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package kv
+
+import (
+	"bufio"
+	"bytes"
+	"fmt"
+	"os"
+	"strings"
+	"unicode"
+	"unicode/utf8"
+
+	"k8s.io/apimachinery/pkg/util/validation"
+)
+
+// Pair represents a <key, value> pair.
+type Pair struct {
+	Key   string
+	Value string
+}
+
+var utf8bom = []byte{0xEF, 0xBB, 0xBF}
+
+// KeyValuesFromLines parses given content in to a list of key-value pairs.
+func KeyValuesFromLines(content []byte) ([]Pair, error) {
+	var kvs []Pair
+
+	scanner := bufio.NewScanner(bytes.NewReader(content))
+	currentLine := 0
+	for scanner.Scan() {
+		// Process the current line, retrieving a key/value pair if
+		// possible.
+		scannedBytes := scanner.Bytes()
+		kv, err := KeyValuesFromLine(scannedBytes, currentLine)
+		if err != nil {
+			return nil, err
+		}
+		currentLine++
+
+		if len(kv.Key) == 0 {
+			// no key means line was empty or a comment
+			continue
+		}
+
+		kvs = append(kvs, kv)
+	}
+	return kvs, nil
+}
+
+// KeyValuesFromLine returns a kv with blank key if the line is empty or a comment.
+// The value will be retrieved from the environment if necessary.
+func KeyValuesFromLine(line []byte, currentLine int) (Pair, error) {
+	kv := Pair{}
+
+	if !utf8.Valid(line) {
+		return kv, fmt.Errorf("line %d has invalid utf8 bytes : %v", line, string(line))
+	}
+
+	// We trim UTF8 BOM from the first line of the file but no others
+	if currentLine == 0 {
+		line = bytes.TrimPrefix(line, utf8bom)
+	}
+
+	// trim the line from all leading whitespace first
+	line = bytes.TrimLeftFunc(line, unicode.IsSpace)
+
+	// If the line is empty or a comment, we return a blank key/value pair.
+	if len(line) == 0 || line[0] == '#' {
+		return kv, nil
+	}
+
+	data := strings.SplitN(string(line), "=", 2)
+	key := data[0]
+	if errs := validation.IsEnvVarName(key); len(errs) != 0 {
+		return kv, fmt.Errorf("%q is not a valid key name: %s", key, strings.Join(errs, ";"))
+	}
+
+	if len(data) == 2 {
+		kv.Value = data[1]
+	} else {
+		// No value (no `=` in the line) is a signal to obtain the value
+		// from the environment.
+		kv.Value = os.Getenv(key)
+	}
+	kv.Key = key
+	return kv, nil
+}
diff --git a/vendor/k8s.io/cli-runtime/pkg/kustomize/k8sdeps/transformer/factory.go b/vendor/k8s.io/cli-runtime/pkg/kustomize/k8sdeps/transformer/factory.go
new file mode 100644
index 00000000..bc435b37
--- /dev/null
+++ b/vendor/k8s.io/cli-runtime/pkg/kustomize/k8sdeps/transformer/factory.go
@@ -0,0 +1,43 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package transformer provides transformer factory
+package transformer
+
+import (
+	"k8s.io/cli-runtime/pkg/kustomize/k8sdeps/transformer/hash"
+	"k8s.io/cli-runtime/pkg/kustomize/k8sdeps/transformer/patch"
+	"sigs.k8s.io/kustomize/pkg/resource"
+	"sigs.k8s.io/kustomize/pkg/transformers"
+)
+
+// FactoryImpl makes patch transformer and name hash transformer
+type FactoryImpl struct{}
+
+// NewFactoryImpl makes a new factoryImpl instance
+func NewFactoryImpl() *FactoryImpl {
+	return &FactoryImpl{}
+}
+
+// MakePatchTransformer makes a new patch transformer
+func (p *FactoryImpl) MakePatchTransformer(slice []*resource.Resource, rf *resource.Factory) (transformers.Transformer, error) {
+	return patch.NewPatchTransformer(slice, rf)
+}
+
+// MakeHashTransformer makes a new name hash transformer
+func (p *FactoryImpl) MakeHashTransformer() transformers.Transformer {
+	return hash.NewNameHashTransformer()
+}
diff --git a/vendor/k8s.io/cli-runtime/pkg/kustomize/k8sdeps/transformer/hash/hash.go b/vendor/k8s.io/cli-runtime/pkg/kustomize/k8sdeps/transformer/hash/hash.go
new file mode 100644
index 00000000..85bf1e73
--- /dev/null
+++ b/vendor/k8s.io/cli-runtime/pkg/kustomize/k8sdeps/transformer/hash/hash.go
@@ -0,0 +1,184 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package hash
+
+import (
+	"crypto/sha256"
+	"encoding/json"
+	"fmt"
+
+	"k8s.io/api/core/v1"
+	"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
+)
+
+// KustHash compute hash for unstructured objects
+type KustHash struct{}
+
+// NewKustHash returns a KustHash object
+func NewKustHash() *KustHash {
+	return &KustHash{}
+}
+
+// Hash returns a hash of either a ConfigMap or a Secret
+func (h *KustHash) Hash(m map[string]interface{}) (string, error) {
+	u := unstructured.Unstructured{
+		Object: m,
+	}
+	kind := u.GetKind()
+	switch kind {
+	case "ConfigMap":
+		cm, err := unstructuredToConfigmap(u)
+		if err != nil {
+			return "", err
+		}
+		return ConfigMapHash(cm)
+	case "Secret":
+		sec, err := unstructuredToSecret(u)
+
+		if err != nil {
+			return "", err
+		}
+		return SecretHash(sec)
+	default:
+		return "", fmt.Errorf("type %s is supported for hashing in %v", kind, m)
+	}
+}
+
+// ConfigMapHash returns a hash of the ConfigMap.
+// The Data, Kind, and Name are taken into account.
+func ConfigMapHash(cm *v1.ConfigMap) (string, error) {
+	encoded, err := encodeConfigMap(cm)
+	if err != nil {
+		return "", err
+	}
+	h, err := encodeHash(hash(encoded))
+	if err != nil {
+		return "", err
+	}
+	return h, nil
+}
+
+// SecretHash returns a hash of the Secret.
+// The Data, Kind, Name, and Type are taken into account.
+func SecretHash(sec *v1.Secret) (string, error) {
+	encoded, err := encodeSecret(sec)
+	if err != nil {
+		return "", err
+	}
+	h, err := encodeHash(hash(encoded))
+	if err != nil {
+		return "", err
+	}
+	return h, nil
+}
+
+// encodeConfigMap encodes a ConfigMap.
+// Data, Kind, and Name are taken into account.
+func encodeConfigMap(cm *v1.ConfigMap) (string, error) {
+	// json.Marshal sorts the keys in a stable order in the encoding
+	m := map[string]interface{}{
+		"kind": "ConfigMap",
+		"name": cm.Name,
+		"data": cm.Data,
+	}
+	if cm.Immutable != nil {
+		m["immutable"] = *cm.Immutable
+	}
+	if len(cm.BinaryData) > 0 {
+		m["binaryData"] = cm.BinaryData
+	}
+	data, err := json.Marshal(m)
+	if err != nil {
+		return "", err
+	}
+	return string(data), nil
+}
+
+// encodeSecret encodes a Secret.
+// Data, Kind, Name, and Type are taken into account.
+func encodeSecret(sec *v1.Secret) (string, error) {
+	// json.Marshal sorts the keys in a stable order in the encoding
+	m := map[string]interface{}{
+		"kind": "Secret",
+		"type": sec.Type,
+		"name": sec.Name,
+		"data": sec.Data,
+	}
+	if sec.Immutable != nil {
+		m["immutable"] = *sec.Immutable
+	}
+	data, err := json.Marshal(m)
+	if err != nil {
+		return "", err
+	}
+	return string(data), nil
+}
+
+// encodeHash extracts the first 40 bits of the hash from the hex string
+// (1 hex char represents 4 bits), and then maps vowels and vowel-like hex
+// characters to consonants to prevent bad words from being formed (the theory
+// is that no vowels makes it really hard to make bad words). Since the string
+// is hex, the only vowels it can contain are 'a' and 'e'.
+// We picked some arbitrary consonants to map to from the same character set as GenerateName.
+// See: https://github.com/kubernetes/apimachinery/blob/dc1f89aff9a7509782bde3b68824c8043a3e58cc/pkg/util/rand/rand.go#L75
+// If the hex string contains fewer than ten characters, returns an error.
+func encodeHash(hex string) (string, error) {
+	if len(hex) < 10 {
+		return "", fmt.Errorf("the hex string must contain at least 10 characters")
+	}
+	enc := []rune(hex[:10])
+	for i := range enc {
+		switch enc[i] {
+		case '0':
+			enc[i] = 'g'
+		case '1':
+			enc[i] = 'h'
+		case '3':
+			enc[i] = 'k'
+		case 'a':
+			enc[i] = 'm'
+		case 'e':
+			enc[i] = 't'
+		}
+	}
+	return string(enc), nil
+}
+
+// hash hashes `data` with sha256 and returns the hex string
+func hash(data string) string {
+	return fmt.Sprintf("%x", sha256.Sum256([]byte(data)))
+}
+
+func unstructuredToConfigmap(u unstructured.Unstructured) (*v1.ConfigMap, error) {
+	marshaled, err := json.Marshal(u.Object)
+	if err != nil {
+		return nil, err
+	}
+	var out v1.ConfigMap
+	err = json.Unmarshal(marshaled, &out)
+	return &out, err
+}
+
+func unstructuredToSecret(u unstructured.Unstructured) (*v1.Secret, error) {
+	marshaled, err := json.Marshal(u.Object)
+	if err != nil {
+		return nil, err
+	}
+	var out v1.Secret
+	err = json.Unmarshal(marshaled, &out)
+	return &out, err
+}
diff --git a/vendor/k8s.io/cli-runtime/pkg/kustomize/k8sdeps/transformer/hash/namehash.go b/vendor/k8s.io/cli-runtime/pkg/kustomize/k8sdeps/transformer/hash/namehash.go
new file mode 100644
index 00000000..a52072e8
--- /dev/null
+++ b/vendor/k8s.io/cli-runtime/pkg/kustomize/k8sdeps/transformer/hash/namehash.go
@@ -0,0 +1,47 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package hash
+
+import (
+	"fmt"
+
+	"sigs.k8s.io/kustomize/pkg/resmap"
+	"sigs.k8s.io/kustomize/pkg/transformers"
+)
+
+type nameHashTransformer struct{}
+
+var _ transformers.Transformer = &nameHashTransformer{}
+
+// NewNameHashTransformer construct a nameHashTransformer.
+func NewNameHashTransformer() transformers.Transformer {
+	return &nameHashTransformer{}
+}
+
+// Transform appends hash to generated resources.
+func (o *nameHashTransformer) Transform(m resmap.ResMap) error {
+	for _, res := range m {
+		if res.NeedHashSuffix() {
+			h, err := NewKustHash().Hash(res.Map())
+			if err != nil {
+				return err
+			}
+			res.SetName(fmt.Sprintf("%s-%s", res.GetName(), h))
+		}
+	}
+	return nil
+}
diff --git a/vendor/k8s.io/cli-runtime/pkg/kustomize/k8sdeps/transformer/patch/patch.go b/vendor/k8s.io/cli-runtime/pkg/kustomize/k8sdeps/transformer/patch/patch.go
new file mode 100644
index 00000000..357f3dab
--- /dev/null
+++ b/vendor/k8s.io/cli-runtime/pkg/kustomize/k8sdeps/transformer/patch/patch.go
@@ -0,0 +1,174 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package patch
+
+import (
+	"encoding/json"
+	"fmt"
+
+	"github.com/evanphx/json-patch"
+	"k8s.io/apimachinery/pkg/runtime"
+	"k8s.io/apimachinery/pkg/runtime/schema"
+	"k8s.io/apimachinery/pkg/util/strategicpatch"
+	"k8s.io/client-go/kubernetes/scheme"
+	"sigs.k8s.io/kustomize/pkg/gvk"
+	"sigs.k8s.io/kustomize/pkg/resmap"
+	"sigs.k8s.io/kustomize/pkg/resource"
+	"sigs.k8s.io/kustomize/pkg/transformers"
+)
+
+// patchTransformer applies patches.
+type patchTransformer struct {
+	patches []*resource.Resource
+	rf      *resource.Factory
+}
+
+var _ transformers.Transformer = &patchTransformer{}
+
+// NewPatchTransformer constructs a patchTransformer.
+func NewPatchTransformer(
+	slice []*resource.Resource, rf *resource.Factory) (transformers.Transformer, error) {
+	if len(slice) == 0 {
+		return transformers.NewNoOpTransformer(), nil
+	}
+	return &patchTransformer{patches: slice, rf: rf}, nil
+}
+
+// Transform apply the patches on top of the base resources.
+func (pt *patchTransformer) Transform(baseResourceMap resmap.ResMap) error {
+	// Merge and then index the patches by Id.
+	patches, err := pt.mergePatches()
+	if err != nil {
+		return err
+	}
+
+	// Strategic merge the resources exist in both base and patches.
+	for _, patch := range patches {
+		// Merge patches with base resource.
+		id := patch.Id()
+		matchedIds := baseResourceMap.GetMatchingIds(id.GvknEquals)
+		if len(matchedIds) == 0 {
+			return fmt.Errorf("failed to find an object with %s to apply the patch", id.GvknString())
+		}
+		if len(matchedIds) > 1 {
+			return fmt.Errorf("found multiple objects %#v targeted by patch %#v (ambiguous)", matchedIds, id)
+		}
+		id = matchedIds[0]
+		base := baseResourceMap[id]
+		merged := map[string]interface{}{}
+		versionedObj, err := scheme.Scheme.New(toSchemaGvk(id.Gvk()))
+		baseName := base.GetName()
+		switch {
+		case runtime.IsNotRegisteredError(err):
+			// Use JSON merge patch to handle types w/o schema
+			baseBytes, err := json.Marshal(base.Map())
+			if err != nil {
+				return err
+			}
+			patchBytes, err := json.Marshal(patch.Map())
+			if err != nil {
+				return err
+			}
+			mergedBytes, err := jsonpatch.MergePatch(baseBytes, patchBytes)
+			if err != nil {
+				return err
+			}
+			err = json.Unmarshal(mergedBytes, &merged)
+			if err != nil {
+				return err
+			}
+		case err != nil:
+			return err
+		default:
+			// Use Strategic-Merge-Patch to handle types w/ schema
+			// TODO: Change this to use the new Merge package.
+			// Store the name of the base object, because this name may have been munged.
+			// Apply this name to the patched object.
+			lookupPatchMeta, err := strategicpatch.NewPatchMetaFromStruct(versionedObj)
+			if err != nil {
+				return err
+			}
+			merged, err = strategicpatch.StrategicMergeMapPatchUsingLookupPatchMeta(
+				base.Map(),
+				patch.Map(),
+				lookupPatchMeta)
+			if err != nil {
+				return err
+			}
+		}
+		base.SetName(baseName)
+		baseResourceMap[id].SetMap(merged)
+	}
+	return nil
+}
+
+// mergePatches merge and index patches by Id.
+// It errors out if there is conflict between patches.
+func (pt *patchTransformer) mergePatches() (resmap.ResMap, error) {
+	rc := resmap.ResMap{}
+	for ix, patch := range pt.patches {
+		id := patch.Id()
+		existing, found := rc[id]
+		if !found {
+			rc[id] = patch
+			continue
+		}
+
+		versionedObj, err := scheme.Scheme.New(toSchemaGvk(id.Gvk()))
+		if err != nil && !runtime.IsNotRegisteredError(err) {
+			return nil, err
+		}
+		var cd conflictDetector
+		if err != nil {
+			cd = newJMPConflictDetector(pt.rf)
+		} else {
+			cd, err = newSMPConflictDetector(versionedObj, pt.rf)
+			if err != nil {
+				return nil, err
+			}
+		}
+
+		conflict, err := cd.hasConflict(existing, patch)
+		if err != nil {
+			return nil, err
+		}
+		if conflict {
+			conflictingPatch, err := cd.findConflict(ix, pt.patches)
+			if err != nil {
+				return nil, err
+			}
+			return nil, fmt.Errorf(
+				"conflict between %#v and %#v",
+				conflictingPatch.Map(), patch.Map())
+		}
+		merged, err := cd.mergePatches(existing, patch)
+		if err != nil {
+			return nil, err
+		}
+		rc[id] = merged
+	}
+	return rc, nil
+}
+
+// toSchemaGvk converts to a schema.GroupVersionKind.
+func toSchemaGvk(x gvk.Gvk) schema.GroupVersionKind {
+	return schema.GroupVersionKind{
+		Group:   x.Group,
+		Version: x.Version,
+		Kind:    x.Kind,
+	}
+}
diff --git a/vendor/k8s.io/cli-runtime/pkg/kustomize/k8sdeps/transformer/patch/patchconflictdetector.go b/vendor/k8s.io/cli-runtime/pkg/kustomize/k8sdeps/transformer/patch/patchconflictdetector.go
new file mode 100644
index 00000000..10353c77
--- /dev/null
+++ b/vendor/k8s.io/cli-runtime/pkg/kustomize/k8sdeps/transformer/patch/patchconflictdetector.go
@@ -0,0 +1,137 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package patch
+
+import (
+	"encoding/json"
+
+	"github.com/evanphx/json-patch"
+	"k8s.io/apimachinery/pkg/runtime"
+	"k8s.io/apimachinery/pkg/util/mergepatch"
+	"k8s.io/apimachinery/pkg/util/strategicpatch"
+	"sigs.k8s.io/kustomize/pkg/resource"
+)
+
+type conflictDetector interface {
+	hasConflict(patch1, patch2 *resource.Resource) (bool, error)
+	findConflict(conflictingPatchIdx int, patches []*resource.Resource) (*resource.Resource, error)
+	mergePatches(patch1, patch2 *resource.Resource) (*resource.Resource, error)
+}
+
+type jsonMergePatch struct {
+	rf *resource.Factory
+}
+
+var _ conflictDetector = &jsonMergePatch{}
+
+func newJMPConflictDetector(rf *resource.Factory) conflictDetector {
+	return &jsonMergePatch{rf: rf}
+}
+
+func (jmp *jsonMergePatch) hasConflict(
+	patch1, patch2 *resource.Resource) (bool, error) {
+	return mergepatch.HasConflicts(patch1.Map(), patch2.Map())
+}
+
+func (jmp *jsonMergePatch) findConflict(
+	conflictingPatchIdx int, patches []*resource.Resource) (*resource.Resource, error) {
+	for i, patch := range patches {
+		if i == conflictingPatchIdx {
+			continue
+		}
+		if !patches[conflictingPatchIdx].Id().GvknEquals(patch.Id()) {
+			continue
+		}
+		conflict, err := mergepatch.HasConflicts(
+			patch.Map(),
+			patches[conflictingPatchIdx].Map())
+		if err != nil {
+			return nil, err
+		}
+		if conflict {
+			return patch, nil
+		}
+	}
+	return nil, nil
+}
+
+func (jmp *jsonMergePatch) mergePatches(
+	patch1, patch2 *resource.Resource) (*resource.Resource, error) {
+	baseBytes, err := json.Marshal(patch1.Map())
+	if err != nil {
+		return nil, err
+	}
+	patchBytes, err := json.Marshal(patch2.Map())
+	if err != nil {
+		return nil, err
+	}
+	mergedBytes, err := jsonpatch.MergeMergePatches(baseBytes, patchBytes)
+	if err != nil {
+		return nil, err
+	}
+	mergedMap := make(map[string]interface{})
+	err = json.Unmarshal(mergedBytes, &mergedMap)
+	return jmp.rf.FromMap(mergedMap), err
+}
+
+type strategicMergePatch struct {
+	lookupPatchMeta strategicpatch.LookupPatchMeta
+	rf              *resource.Factory
+}
+
+var _ conflictDetector = &strategicMergePatch{}
+
+func newSMPConflictDetector(
+	versionedObj runtime.Object,
+	rf *resource.Factory) (conflictDetector, error) {
+	lookupPatchMeta, err := strategicpatch.NewPatchMetaFromStruct(versionedObj)
+	return &strategicMergePatch{lookupPatchMeta: lookupPatchMeta, rf: rf}, err
+}
+
+func (smp *strategicMergePatch) hasConflict(p1, p2 *resource.Resource) (bool, error) {
+	return strategicpatch.MergingMapsHaveConflicts(
+		p1.Map(), p2.Map(), smp.lookupPatchMeta)
+}
+
+func (smp *strategicMergePatch) findConflict(
+	conflictingPatchIdx int, patches []*resource.Resource) (*resource.Resource, error) {
+	for i, patch := range patches {
+		if i == conflictingPatchIdx {
+			continue
+		}
+		if !patches[conflictingPatchIdx].Id().GvknEquals(patch.Id()) {
+			continue
+		}
+		conflict, err := strategicpatch.MergingMapsHaveConflicts(
+			patch.Map(),
+			patches[conflictingPatchIdx].Map(),
+			smp.lookupPatchMeta)
+		if err != nil {
+			return nil, err
+		}
+		if conflict {
+			return patch, nil
+		}
+	}
+	return nil, nil
+}
+
+func (smp *strategicMergePatch) mergePatches(patch1, patch2 *resource.Resource) (*resource.Resource, error) {
+	mergeJSONMap, err := strategicpatch.MergeStrategicMergeMapPatchUsingLookupPatchMeta(
+		smp.lookupPatchMeta, patch1.Map(), patch2.Map())
+	return smp.rf.FromMap(mergeJSONMap), err
+}
diff --git a/vendor/k8s.io/cli-runtime/pkg/kustomize/k8sdeps/validator/validators.go b/vendor/k8s.io/cli-runtime/pkg/kustomize/k8sdeps/validator/validators.go
new file mode 100644
index 00000000..563e8d6b
--- /dev/null
+++ b/vendor/k8s.io/cli-runtime/pkg/kustomize/k8sdeps/validator/validators.go
@@ -0,0 +1,61 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package validator provides functions to validate labels, annotations, namespace using apimachinery
+package validator
+
+import (
+	"errors"
+	apivalidation "k8s.io/apimachinery/pkg/api/validation"
+	v1validation "k8s.io/apimachinery/pkg/apis/meta/v1/validation"
+	"k8s.io/apimachinery/pkg/util/validation"
+	"k8s.io/apimachinery/pkg/util/validation/field"
+)
+
+// KustValidator validates Labels and annotations by apimachinery
+type KustValidator struct{}
+
+// NewKustValidator returns a KustValidator object
+func NewKustValidator() *KustValidator {
+	return &KustValidator{}
+}
+
+// MakeAnnotationValidator returns a MapValidatorFunc using apimachinery.
+func (v *KustValidator) MakeAnnotationValidator() func(map[string]string) error {
+	return func(x map[string]string) error {
+		errs := apivalidation.ValidateAnnotations(x, field.NewPath("field"))
+		if len(errs) > 0 {
+			return errors.New(errs.ToAggregate().Error())
+		}
+		return nil
+	}
+}
+
+// MakeLabelValidator returns a MapValidatorFunc using apimachinery.
+func (v *KustValidator) MakeLabelValidator() func(map[string]string) error {
+	return func(x map[string]string) error {
+		errs := v1validation.ValidateLabels(x, field.NewPath("field"))
+		if len(errs) > 0 {
+			return errors.New(errs.ToAggregate().Error())
+		}
+		return nil
+	}
+}
+
+// ValidateNamespace validates a string is a valid namespace using apimachinery.
+func (v *KustValidator) ValidateNamespace(s string) []string {
+	return validation.IsDNS1123Label(s)
+}
diff --git a/vendor/k8s.io/cli-runtime/pkg/printers/discard.go b/vendor/k8s.io/cli-runtime/pkg/printers/discard.go
new file mode 100644
index 00000000..cd934976
--- /dev/null
+++ b/vendor/k8s.io/cli-runtime/pkg/printers/discard.go
@@ -0,0 +1,30 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package printers
+
+import (
+	"io"
+
+	"k8s.io/apimachinery/pkg/runtime"
+)
+
+// NewDiscardingPrinter is a printer that discards all objects
+func NewDiscardingPrinter() ResourcePrinterFunc {
+	return ResourcePrinterFunc(func(runtime.Object, io.Writer) error {
+		return nil
+	})
+}
diff --git a/vendor/k8s.io/cli-runtime/pkg/printers/doc.go b/vendor/k8s.io/cli-runtime/pkg/printers/doc.go
new file mode 100644
index 00000000..ee205371
--- /dev/null
+++ b/vendor/k8s.io/cli-runtime/pkg/printers/doc.go
@@ -0,0 +1,19 @@
+/*
+Copyright 2019 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package printers is helper for formatting and printing runtime objects into
+// primitives io.writer.
+package printers // import "k8s.io/cli-runtime/pkg/printers"
diff --git a/vendor/k8s.io/cli-runtime/pkg/printers/interface.go b/vendor/k8s.io/cli-runtime/pkg/printers/interface.go
new file mode 100644
index 00000000..e06757f6
--- /dev/null
+++ b/vendor/k8s.io/cli-runtime/pkg/printers/interface.go
@@ -0,0 +1,54 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package printers
+
+import (
+	"io"
+
+	"k8s.io/apimachinery/pkg/runtime"
+	"k8s.io/apimachinery/pkg/runtime/schema"
+)
+
+// ResourcePrinterFunc is a function that can print objects
+type ResourcePrinterFunc func(runtime.Object, io.Writer) error
+
+// PrintObj implements ResourcePrinter
+func (fn ResourcePrinterFunc) PrintObj(obj runtime.Object, w io.Writer) error {
+	return fn(obj, w)
+}
+
+// ResourcePrinter is an interface that knows how to print runtime objects.
+type ResourcePrinter interface {
+	// Print receives a runtime object, formats it and prints it to a writer.
+	PrintObj(runtime.Object, io.Writer) error
+}
+
+// PrintOptions struct defines a struct for various print options
+type PrintOptions struct {
+	NoHeaders     bool
+	WithNamespace bool
+	WithKind      bool
+	Wide          bool
+	ShowLabels    bool
+	Kind          schema.GroupKind
+	ColumnLabels  []string
+
+	SortBy string
+
+	// indicates if it is OK to ignore missing keys for rendering an output template.
+	AllowMissingKeys bool
+}
diff --git a/vendor/k8s.io/cli-runtime/pkg/printers/json.go b/vendor/k8s.io/cli-runtime/pkg/printers/json.go
new file mode 100644
index 00000000..1c35b97d
--- /dev/null
+++ b/vendor/k8s.io/cli-runtime/pkg/printers/json.go
@@ -0,0 +1,142 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package printers
+
+import (
+	"bytes"
+	"encoding/json"
+	"fmt"
+	"io"
+	"reflect"
+	"sync/atomic"
+
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	"k8s.io/apimachinery/pkg/runtime"
+
+	"sigs.k8s.io/yaml"
+)
+
+// JSONPrinter is an implementation of ResourcePrinter which outputs an object as JSON.
+type JSONPrinter struct{}
+
+// PrintObj is an implementation of ResourcePrinter.PrintObj which simply writes the object to the Writer.
+func (p *JSONPrinter) PrintObj(obj runtime.Object, w io.Writer) error {
+	// we use reflect.Indirect here in order to obtain the actual value from a pointer.
+	// we need an actual value in order to retrieve the package path for an object.
+	// using reflect.Indirect indiscriminately is valid here, as all runtime.Objects are supposed to be pointers.
+	if InternalObjectPreventer.IsForbidden(reflect.Indirect(reflect.ValueOf(obj)).Type().PkgPath()) {
+		return fmt.Errorf(InternalObjectPrinterErr)
+	}
+
+	switch obj := obj.(type) {
+	case *metav1.WatchEvent:
+		if InternalObjectPreventer.IsForbidden(reflect.Indirect(reflect.ValueOf(obj.Object.Object)).Type().PkgPath()) {
+			return fmt.Errorf(InternalObjectPrinterErr)
+		}
+		data, err := json.Marshal(obj)
+		if err != nil {
+			return err
+		}
+		_, err = w.Write(data)
+		if err != nil {
+			return err
+		}
+		_, err = w.Write([]byte{'\n'})
+		return err
+	case *runtime.Unknown:
+		var buf bytes.Buffer
+		err := json.Indent(&buf, obj.Raw, "", "    ")
+		if err != nil {
+			return err
+		}
+		buf.WriteRune('\n')
+		_, err = buf.WriteTo(w)
+		return err
+	}
+
+	if obj.GetObjectKind().GroupVersionKind().Empty() {
+		return fmt.Errorf("missing apiVersion or kind; try GetObjectKind().SetGroupVersionKind() if you know the type")
+	}
+
+	data, err := json.MarshalIndent(obj, "", "    ")
+	if err != nil {
+		return err
+	}
+	data = append(data, '\n')
+	_, err = w.Write(data)
+	return err
+}
+
+// YAMLPrinter is an implementation of ResourcePrinter which outputs an object as YAML.
+// The input object is assumed to be in the internal version of an API and is converted
+// to the given version first.
+// If PrintObj() is called multiple times, objects are separated with a '---' separator.
+type YAMLPrinter struct {
+	printCount int64
+}
+
+// PrintObj prints the data as YAML.
+func (p *YAMLPrinter) PrintObj(obj runtime.Object, w io.Writer) error {
+	// we use reflect.Indirect here in order to obtain the actual value from a pointer.
+	// we need an actual value in order to retrieve the package path for an object.
+	// using reflect.Indirect indiscriminately is valid here, as all runtime.Objects are supposed to be pointers.
+	if InternalObjectPreventer.IsForbidden(reflect.Indirect(reflect.ValueOf(obj)).Type().PkgPath()) {
+		return fmt.Errorf(InternalObjectPrinterErr)
+	}
+
+	count := atomic.AddInt64(&p.printCount, 1)
+	if count > 1 {
+		if _, err := w.Write([]byte("---\n")); err != nil {
+			return err
+		}
+	}
+
+	switch obj := obj.(type) {
+	case *metav1.WatchEvent:
+		if InternalObjectPreventer.IsForbidden(reflect.Indirect(reflect.ValueOf(obj.Object.Object)).Type().PkgPath()) {
+			return fmt.Errorf(InternalObjectPrinterErr)
+		}
+		data, err := json.Marshal(obj)
+		if err != nil {
+			return err
+		}
+		data, err = yaml.JSONToYAML(data)
+		if err != nil {
+			return err
+		}
+		_, err = w.Write(data)
+		return err
+	case *runtime.Unknown:
+		data, err := yaml.JSONToYAML(obj.Raw)
+		if err != nil {
+			return err
+		}
+		_, err = w.Write(data)
+		return err
+	}
+
+	if obj.GetObjectKind().GroupVersionKind().Empty() {
+		return fmt.Errorf("missing apiVersion or kind; try GetObjectKind().SetGroupVersionKind() if you know the type")
+	}
+
+	output, err := yaml.Marshal(obj)
+	if err != nil {
+		return err
+	}
+	_, err = fmt.Fprint(w, string(output))
+	return err
+}
diff --git a/vendor/k8s.io/cli-runtime/pkg/printers/jsonpath.go b/vendor/k8s.io/cli-runtime/pkg/printers/jsonpath.go
new file mode 100644
index 00000000..333b9c33
--- /dev/null
+++ b/vendor/k8s.io/cli-runtime/pkg/printers/jsonpath.go
@@ -0,0 +1,147 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package printers
+
+import (
+	"bytes"
+	"encoding/json"
+	"fmt"
+	"io"
+	"reflect"
+
+	"k8s.io/apimachinery/pkg/runtime"
+	"k8s.io/client-go/util/jsonpath"
+)
+
+// exists returns true if it would be possible to call the index function
+// with these arguments.
+//
+// TODO: how to document this for users?
+//
+// index returns the result of indexing its first argument by the following
+// arguments.  Thus "index x 1 2 3" is, in Go syntax, x[1][2][3]. Each
+// indexed item must be a map, slice, or array.
+func exists(item interface{}, indices ...interface{}) bool {
+	v := reflect.ValueOf(item)
+	for _, i := range indices {
+		index := reflect.ValueOf(i)
+		var isNil bool
+		if v, isNil = indirect(v); isNil {
+			return false
+		}
+		switch v.Kind() {
+		case reflect.Array, reflect.Slice, reflect.String:
+			var x int64
+			switch index.Kind() {
+			case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+				x = index.Int()
+			case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+				x = int64(index.Uint())
+			default:
+				return false
+			}
+			if x < 0 || x >= int64(v.Len()) {
+				return false
+			}
+			v = v.Index(int(x))
+		case reflect.Map:
+			if !index.IsValid() {
+				index = reflect.Zero(v.Type().Key())
+			}
+			if !index.Type().AssignableTo(v.Type().Key()) {
+				return false
+			}
+			if x := v.MapIndex(index); x.IsValid() {
+				v = x
+			} else {
+				v = reflect.Zero(v.Type().Elem())
+			}
+		default:
+			return false
+		}
+	}
+	if _, isNil := indirect(v); isNil {
+		return false
+	}
+	return true
+}
+
+// stolen from text/template
+// indirect returns the item at the end of indirection, and a bool to indicate if it's nil.
+// We indirect through pointers and empty interfaces (only) because
+// non-empty interfaces have methods we might need.
+func indirect(v reflect.Value) (rv reflect.Value, isNil bool) {
+	for ; v.Kind() == reflect.Ptr || v.Kind() == reflect.Interface; v = v.Elem() {
+		if v.IsNil() {
+			return v, true
+		}
+		if v.Kind() == reflect.Interface && v.NumMethod() > 0 {
+			break
+		}
+	}
+	return v, false
+}
+
+// JSONPathPrinter is an implementation of ResourcePrinter which formats data with jsonpath expression.
+type JSONPathPrinter struct {
+	rawTemplate string
+	*jsonpath.JSONPath
+}
+
+func NewJSONPathPrinter(tmpl string) (*JSONPathPrinter, error) {
+	j := jsonpath.New("out")
+	if err := j.Parse(tmpl); err != nil {
+		return nil, err
+	}
+	return &JSONPathPrinter{
+		rawTemplate: tmpl,
+		JSONPath:    j,
+	}, nil
+}
+
+// PrintObj formats the obj with the JSONPath Template.
+func (j *JSONPathPrinter) PrintObj(obj runtime.Object, w io.Writer) error {
+	// we use reflect.Indirect here in order to obtain the actual value from a pointer.
+	// we need an actual value in order to retrieve the package path for an object.
+	// using reflect.Indirect indiscriminately is valid here, as all runtime.Objects are supposed to be pointers.
+	if InternalObjectPreventer.IsForbidden(reflect.Indirect(reflect.ValueOf(obj)).Type().PkgPath()) {
+		return fmt.Errorf(InternalObjectPrinterErr)
+	}
+
+	var queryObj interface{} = obj
+	if unstructured, ok := obj.(runtime.Unstructured); ok {
+		queryObj = unstructured.UnstructuredContent()
+	} else {
+		data, err := json.Marshal(obj)
+		if err != nil {
+			return err
+		}
+		queryObj = map[string]interface{}{}
+		if err := json.Unmarshal(data, &queryObj); err != nil {
+			return err
+		}
+	}
+
+	if err := j.JSONPath.Execute(w, queryObj); err != nil {
+		buf := bytes.NewBuffer(nil)
+		fmt.Fprintf(buf, "Error executing template: %v. Printing more information for debugging the template:\n", err)
+		fmt.Fprintf(buf, "\ttemplate was:\n\t\t%v\n", j.rawTemplate)
+		fmt.Fprintf(buf, "\tobject given to jsonpath engine was:\n\t\t%#v\n\n", queryObj)
+		return fmt.Errorf("error executing jsonpath %q: %v\n", j.rawTemplate, buf.String())
+	}
+	return nil
+}
diff --git a/vendor/k8s.io/cli-runtime/pkg/printers/name.go b/vendor/k8s.io/cli-runtime/pkg/printers/name.go
new file mode 100644
index 00000000..086166af
--- /dev/null
+++ b/vendor/k8s.io/cli-runtime/pkg/printers/name.go
@@ -0,0 +1,130 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package printers
+
+import (
+	"fmt"
+	"io"
+	"reflect"
+	"strings"
+
+	"k8s.io/apimachinery/pkg/api/meta"
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
+	"k8s.io/apimachinery/pkg/runtime"
+	"k8s.io/apimachinery/pkg/runtime/schema"
+)
+
+// NamePrinter is an implementation of ResourcePrinter which outputs "resource/name" pair of an object.
+type NamePrinter struct {
+	// ShortOutput indicates whether an operation should be
+	// printed along side the "resource/name" pair for an object.
+	ShortOutput bool
+	// Operation describes the name of the action that
+	// took place on an object, to be included in the
+	// finalized "successful" message.
+	Operation string
+}
+
+// PrintObj is an implementation of ResourcePrinter.PrintObj which decodes the object
+// and print "resource/name" pair. If the object is a List, print all items in it.
+func (p *NamePrinter) PrintObj(obj runtime.Object, w io.Writer) error {
+	switch castObj := obj.(type) {
+	case *metav1.WatchEvent:
+		obj = castObj.Object.Object
+	}
+
+	// we use reflect.Indirect here in order to obtain the actual value from a pointer.
+	// using reflect.Indirect indiscriminately is valid here, as all runtime.Objects are supposed to be pointers.
+	// we need an actual value in order to retrieve the package path for an object.
+	if InternalObjectPreventer.IsForbidden(reflect.Indirect(reflect.ValueOf(obj)).Type().PkgPath()) {
+		return fmt.Errorf(InternalObjectPrinterErr)
+	}
+
+	if meta.IsListType(obj) {
+		// we allow unstructured lists for now because they always contain the GVK information.  We should chase down
+		// callers and stop them from passing unflattened lists
+		// TODO chase the caller that is setting this and remove it.
+		if _, ok := obj.(*unstructured.UnstructuredList); !ok {
+			return fmt.Errorf("list types are not supported by name printing: %T", obj)
+		}
+
+		items, err := meta.ExtractList(obj)
+		if err != nil {
+			return err
+		}
+		for _, obj := range items {
+			if err := p.PrintObj(obj, w); err != nil {
+				return err
+			}
+		}
+		return nil
+	}
+
+	if obj.GetObjectKind().GroupVersionKind().Empty() {
+		return fmt.Errorf("missing apiVersion or kind; try GetObjectKind().SetGroupVersionKind() if you know the type")
+	}
+
+	name := "<unknown>"
+	if acc, err := meta.Accessor(obj); err == nil {
+		if n := acc.GetName(); len(n) > 0 {
+			name = n
+		}
+	}
+
+	return printObj(w, name, p.Operation, p.ShortOutput, GetObjectGroupKind(obj))
+}
+
+func GetObjectGroupKind(obj runtime.Object) schema.GroupKind {
+	if obj == nil {
+		return schema.GroupKind{Kind: "<unknown>"}
+	}
+	groupVersionKind := obj.GetObjectKind().GroupVersionKind()
+	if len(groupVersionKind.Kind) > 0 {
+		return groupVersionKind.GroupKind()
+	}
+
+	if uns, ok := obj.(*unstructured.Unstructured); ok {
+		if len(uns.GroupVersionKind().Kind) > 0 {
+			return uns.GroupVersionKind().GroupKind()
+		}
+	}
+
+	return schema.GroupKind{Kind: "<unknown>"}
+}
+
+func printObj(w io.Writer, name string, operation string, shortOutput bool, groupKind schema.GroupKind) error {
+	if len(groupKind.Kind) == 0 {
+		return fmt.Errorf("missing kind for resource with name %v", name)
+	}
+
+	if len(operation) > 0 {
+		operation = " " + operation
+	}
+
+	if shortOutput {
+		operation = ""
+	}
+
+	if len(groupKind.Group) == 0 {
+		fmt.Fprintf(w, "%s/%s%s\n", strings.ToLower(groupKind.Kind), name, operation)
+		return nil
+	}
+
+	fmt.Fprintf(w, "%s.%s/%s%s\n", strings.ToLower(groupKind.Kind), groupKind.Group, name, operation)
+	return nil
+}
diff --git a/vendor/k8s.io/cli-runtime/pkg/printers/sourcechecker.go b/vendor/k8s.io/cli-runtime/pkg/printers/sourcechecker.go
new file mode 100644
index 00000000..e360c8fe
--- /dev/null
+++ b/vendor/k8s.io/cli-runtime/pkg/printers/sourcechecker.go
@@ -0,0 +1,60 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package printers
+
+import (
+	"strings"
+)
+
+var (
+	InternalObjectPrinterErr = "a versioned object must be passed to a printer"
+
+	// disallowedPackagePrefixes contains regular expression templates
+	// for object package paths that are not allowed by printers.
+	disallowedPackagePrefixes = []string{
+		"k8s.io/kubernetes/pkg/apis/",
+	}
+)
+
+var InternalObjectPreventer = &illegalPackageSourceChecker{disallowedPackagePrefixes}
+
+func IsInternalObjectError(err error) bool {
+	if err == nil {
+		return false
+	}
+
+	return err.Error() == InternalObjectPrinterErr
+}
+
+// illegalPackageSourceChecker compares a given
+// object's package path, and determines if the
+// object originates from a disallowed source.
+type illegalPackageSourceChecker struct {
+	// disallowedPrefixes is a slice of disallowed package path
+	// prefixes for a given runtime.Object that we are printing.
+	disallowedPrefixes []string
+}
+
+func (c *illegalPackageSourceChecker) IsForbidden(pkgPath string) bool {
+	for _, forbiddenPrefix := range c.disallowedPrefixes {
+		if strings.HasPrefix(pkgPath, forbiddenPrefix) || strings.Contains(pkgPath, "/vendor/"+forbiddenPrefix) {
+			return true
+		}
+	}
+
+	return false
+}
diff --git a/vendor/k8s.io/cli-runtime/pkg/printers/tableprinter.go b/vendor/k8s.io/cli-runtime/pkg/printers/tableprinter.go
new file mode 100644
index 00000000..56bd05aa
--- /dev/null
+++ b/vendor/k8s.io/cli-runtime/pkg/printers/tableprinter.go
@@ -0,0 +1,573 @@
+/*
+Copyright 2019 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package printers
+
+import (
+	"fmt"
+	"io"
+	"reflect"
+	"strings"
+	"time"
+
+	"github.com/liggitt/tabwriter"
+	"k8s.io/apimachinery/pkg/api/meta"
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	"k8s.io/apimachinery/pkg/labels"
+	"k8s.io/apimachinery/pkg/runtime"
+	"k8s.io/apimachinery/pkg/util/duration"
+	"k8s.io/apimachinery/pkg/watch"
+)
+
+var _ ResourcePrinter = &HumanReadablePrinter{}
+
+type printHandler struct {
+	columnDefinitions []metav1.TableColumnDefinition
+	printFunc         reflect.Value
+}
+
+var (
+	statusHandlerEntry = &printHandler{
+		columnDefinitions: statusColumnDefinitions,
+		printFunc:         reflect.ValueOf(printStatus),
+	}
+
+	statusColumnDefinitions = []metav1.TableColumnDefinition{
+		{Name: "Status", Type: "string"},
+		{Name: "Reason", Type: "string"},
+		{Name: "Message", Type: "string"},
+	}
+
+	defaultHandlerEntry = &printHandler{
+		columnDefinitions: objectMetaColumnDefinitions,
+		printFunc:         reflect.ValueOf(printObjectMeta),
+	}
+
+	objectMetaColumnDefinitions = []metav1.TableColumnDefinition{
+		{Name: "Name", Type: "string", Format: "name", Description: metav1.ObjectMeta{}.SwaggerDoc()["name"]},
+		{Name: "Age", Type: "string", Description: metav1.ObjectMeta{}.SwaggerDoc()["creationTimestamp"]},
+	}
+
+	withEventTypePrefixColumns = []string{"EVENT"}
+	withNamespacePrefixColumns = []string{"NAMESPACE"} // TODO(erictune): print cluster name too.
+)
+
+// HumanReadablePrinter is an implementation of ResourcePrinter which attempts to provide
+// more elegant output. It is not threadsafe, but you may call PrintObj repeatedly; headers
+// will only be printed if the object type changes. This makes it useful for printing items
+// received from watches.
+type HumanReadablePrinter struct {
+	options        PrintOptions
+	lastType       interface{}
+	lastColumns    []metav1.TableColumnDefinition
+	printedHeaders bool
+}
+
+// NewTablePrinter creates a printer suitable for calling PrintObj().
+func NewTablePrinter(options PrintOptions) ResourcePrinter {
+	printer := &HumanReadablePrinter{
+		options: options,
+	}
+	return printer
+}
+
+func printHeader(columnNames []string, w io.Writer) error {
+	if _, err := fmt.Fprintf(w, "%s\n", strings.Join(columnNames, "\t")); err != nil {
+		return err
+	}
+	return nil
+}
+
+// PrintObj prints the obj in a human-friendly format according to the type of the obj.
+func (h *HumanReadablePrinter) PrintObj(obj runtime.Object, output io.Writer) error {
+
+	w, found := output.(*tabwriter.Writer)
+	if !found {
+		w = GetNewTabWriter(output)
+		output = w
+		defer w.Flush()
+	}
+
+	var eventType string
+	if event, isEvent := obj.(*metav1.WatchEvent); isEvent {
+		eventType = event.Type
+		obj = event.Object.Object
+	}
+
+	// Parameter "obj" is a table from server; print it.
+	// display tables following the rules of options
+	if table, ok := obj.(*metav1.Table); ok {
+		// Do not print headers if this table has no column definitions, or they are the same as the last ones we printed
+		localOptions := h.options
+		if h.printedHeaders && (len(table.ColumnDefinitions) == 0 || reflect.DeepEqual(table.ColumnDefinitions, h.lastColumns)) {
+			localOptions.NoHeaders = true
+		}
+
+		if len(table.ColumnDefinitions) == 0 {
+			// If this table has no column definitions, use the columns from the last table we printed for decoration and layout.
+			// This is done when receiving tables in watch events to save bandwidth.
+			table.ColumnDefinitions = h.lastColumns
+		} else if !reflect.DeepEqual(table.ColumnDefinitions, h.lastColumns) {
+			// If this table has column definitions, remember them for future use.
+			h.lastColumns = table.ColumnDefinitions
+			h.printedHeaders = false
+		}
+
+		if len(table.Rows) > 0 {
+			h.printedHeaders = true
+		}
+
+		if err := decorateTable(table, localOptions); err != nil {
+			return err
+		}
+		if len(eventType) > 0 {
+			if err := addColumns(beginning, table,
+				[]metav1.TableColumnDefinition{{Name: "Event", Type: "string"}},
+				[]cellValueFunc{func(metav1.TableRow) (interface{}, error) { return formatEventType(eventType), nil }},
+			); err != nil {
+				return err
+			}
+		}
+		return printTable(table, output, localOptions)
+	}
+
+	// Could not find print handler for "obj"; use the default or status print handler.
+	// Print with the default or status handler, and use the columns from the last time
+	var handler *printHandler
+	if _, isStatus := obj.(*metav1.Status); isStatus {
+		handler = statusHandlerEntry
+	} else {
+		handler = defaultHandlerEntry
+	}
+
+	includeHeaders := h.lastType != handler && !h.options.NoHeaders
+
+	if h.lastType != nil && h.lastType != handler && !h.options.NoHeaders {
+		fmt.Fprintln(output)
+	}
+
+	if err := printRowsForHandlerEntry(output, handler, eventType, obj, h.options, includeHeaders); err != nil {
+		return err
+	}
+	h.lastType = handler
+
+	return nil
+}
+
+// printTable prints a table to the provided output respecting the filtering rules for options
+// for wide columns and filtered rows. It filters out rows that are Completed. You should call
+// decorateTable if you receive a table from a remote server before calling printTable.
+func printTable(table *metav1.Table, output io.Writer, options PrintOptions) error {
+	if !options.NoHeaders {
+		// avoid printing headers if we have no rows to display
+		if len(table.Rows) == 0 {
+			return nil
+		}
+
+		first := true
+		for _, column := range table.ColumnDefinitions {
+			if !options.Wide && column.Priority != 0 {
+				continue
+			}
+			if first {
+				first = false
+			} else {
+				fmt.Fprint(output, "\t")
+			}
+			fmt.Fprint(output, strings.ToUpper(column.Name))
+		}
+		fmt.Fprintln(output)
+	}
+	for _, row := range table.Rows {
+		first := true
+		for i, cell := range row.Cells {
+			if i >= len(table.ColumnDefinitions) {
+				// https://issue.k8s.io/66379
+				// don't panic in case of bad output from the server, with more cells than column definitions
+				break
+			}
+			column := table.ColumnDefinitions[i]
+			if !options.Wide && column.Priority != 0 {
+				continue
+			}
+			if first {
+				first = false
+			} else {
+				fmt.Fprint(output, "\t")
+			}
+			if cell != nil {
+				fmt.Fprint(output, cell)
+			}
+		}
+		fmt.Fprintln(output)
+	}
+	return nil
+}
+
+type cellValueFunc func(metav1.TableRow) (interface{}, error)
+
+type columnAddPosition int
+
+const (
+	beginning columnAddPosition = 1
+	end       columnAddPosition = 2
+)
+
+func addColumns(pos columnAddPosition, table *metav1.Table, columns []metav1.TableColumnDefinition, valueFuncs []cellValueFunc) error {
+	if len(columns) != len(valueFuncs) {
+		return fmt.Errorf("cannot prepend columns, unmatched value functions")
+	}
+	if len(columns) == 0 {
+		return nil
+	}
+
+	// Compute the new rows
+	newRows := make([][]interface{}, len(table.Rows))
+	for i := range table.Rows {
+		newCells := make([]interface{}, 0, len(columns)+len(table.Rows[i].Cells))
+
+		if pos == end {
+			// If we're appending, start with the existing cells,
+			// then add nil cells to match the number of columns
+			newCells = append(newCells, table.Rows[i].Cells...)
+			for len(newCells) < len(table.ColumnDefinitions) {
+				newCells = append(newCells, nil)
+			}
+		}
+
+		// Compute cells for new columns
+		for _, f := range valueFuncs {
+			newCell, err := f(table.Rows[i])
+			if err != nil {
+				return err
+			}
+			newCells = append(newCells, newCell)
+		}
+
+		if pos == beginning {
+			// If we're prepending, add existing cells
+			newCells = append(newCells, table.Rows[i].Cells...)
+		}
+
+		// Remember the new cells for this row
+		newRows[i] = newCells
+	}
+
+	// All cells successfully computed, now replace columns and rows
+	newColumns := make([]metav1.TableColumnDefinition, 0, len(columns)+len(table.ColumnDefinitions))
+	switch pos {
+	case beginning:
+		newColumns = append(newColumns, columns...)
+		newColumns = append(newColumns, table.ColumnDefinitions...)
+	case end:
+		newColumns = append(newColumns, table.ColumnDefinitions...)
+		newColumns = append(newColumns, columns...)
+	default:
+		return fmt.Errorf("invalid column add position: %v", pos)
+	}
+	table.ColumnDefinitions = newColumns
+	for i := range table.Rows {
+		table.Rows[i].Cells = newRows[i]
+	}
+
+	return nil
+}
+
+// decorateTable takes a table and attempts to add label columns and the
+// namespace column. It will fill empty columns with nil (if the object
+// does not expose metadata). It returns an error if the table cannot
+// be decorated.
+func decorateTable(table *metav1.Table, options PrintOptions) error {
+	width := len(table.ColumnDefinitions) + len(options.ColumnLabels)
+	if options.WithNamespace {
+		width++
+	}
+	if options.ShowLabels {
+		width++
+	}
+
+	columns := table.ColumnDefinitions
+
+	nameColumn := -1
+	if options.WithKind && !options.Kind.Empty() {
+		for i := range columns {
+			if columns[i].Format == "name" && columns[i].Type == "string" {
+				nameColumn = i
+				break
+			}
+		}
+	}
+
+	if width != len(table.ColumnDefinitions) {
+		columns = make([]metav1.TableColumnDefinition, 0, width)
+		if options.WithNamespace {
+			columns = append(columns, metav1.TableColumnDefinition{
+				Name: "Namespace",
+				Type: "string",
+			})
+		}
+		columns = append(columns, table.ColumnDefinitions...)
+		for _, label := range formatLabelHeaders(options.ColumnLabels) {
+			columns = append(columns, metav1.TableColumnDefinition{
+				Name: label,
+				Type: "string",
+			})
+		}
+		if options.ShowLabels {
+			columns = append(columns, metav1.TableColumnDefinition{
+				Name: "Labels",
+				Type: "string",
+			})
+		}
+	}
+
+	rows := table.Rows
+
+	includeLabels := len(options.ColumnLabels) > 0 || options.ShowLabels
+	if includeLabels || options.WithNamespace || nameColumn != -1 {
+		for i := range rows {
+			row := rows[i]
+
+			if nameColumn != -1 {
+				row.Cells[nameColumn] = fmt.Sprintf("%s/%s", strings.ToLower(options.Kind.String()), row.Cells[nameColumn])
+			}
+
+			var m metav1.Object
+			if obj := row.Object.Object; obj != nil {
+				if acc, err := meta.Accessor(obj); err == nil {
+					m = acc
+				}
+			}
+			// if we can't get an accessor, fill out the appropriate columns with empty spaces
+			if m == nil {
+				if options.WithNamespace {
+					r := make([]interface{}, 1, width)
+					row.Cells = append(r, row.Cells...)
+				}
+				for j := 0; j < width-len(row.Cells); j++ {
+					row.Cells = append(row.Cells, nil)
+				}
+				rows[i] = row
+				continue
+			}
+
+			if options.WithNamespace {
+				r := make([]interface{}, 1, width)
+				r[0] = m.GetNamespace()
+				row.Cells = append(r, row.Cells...)
+			}
+			if includeLabels {
+				row.Cells = appendLabelCells(row.Cells, m.GetLabels(), options)
+			}
+			rows[i] = row
+		}
+	}
+
+	table.ColumnDefinitions = columns
+	table.Rows = rows
+	return nil
+}
+
+// printRowsForHandlerEntry prints the incremental table output (headers if the current type is
+// different from lastType) including all the rows in the object. It returns the current type
+// or an error, if any.
+func printRowsForHandlerEntry(output io.Writer, handler *printHandler, eventType string, obj runtime.Object, options PrintOptions, includeHeaders bool) error {
+	var results []reflect.Value
+
+	args := []reflect.Value{reflect.ValueOf(obj), reflect.ValueOf(options)}
+	results = handler.printFunc.Call(args)
+	if !results[1].IsNil() {
+		return results[1].Interface().(error)
+	}
+
+	if includeHeaders {
+		var headers []string
+		for _, column := range handler.columnDefinitions {
+			if column.Priority != 0 && !options.Wide {
+				continue
+			}
+			headers = append(headers, strings.ToUpper(column.Name))
+		}
+		headers = append(headers, formatLabelHeaders(options.ColumnLabels)...)
+		// LABELS is always the last column.
+		headers = append(headers, formatShowLabelsHeader(options.ShowLabels)...)
+		// prepend namespace header
+		if options.WithNamespace {
+			headers = append(withNamespacePrefixColumns, headers...)
+		}
+		// prepend event type header
+		if len(eventType) > 0 {
+			headers = append(withEventTypePrefixColumns, headers...)
+		}
+		printHeader(headers, output)
+	}
+
+	if results[1].IsNil() {
+		rows := results[0].Interface().([]metav1.TableRow)
+		printRows(output, eventType, rows, options)
+		return nil
+	}
+	return results[1].Interface().(error)
+}
+
+var formattedEventType = map[string]string{
+	string(watch.Added):    "ADDED   ",
+	string(watch.Modified): "MODIFIED",
+	string(watch.Deleted):  "DELETED ",
+	string(watch.Error):    "ERROR   ",
+}
+
+func formatEventType(eventType string) string {
+	if formatted, ok := formattedEventType[eventType]; ok {
+		return formatted
+	}
+	return string(eventType)
+}
+
+// printRows writes the provided rows to output.
+func printRows(output io.Writer, eventType string, rows []metav1.TableRow, options PrintOptions) {
+	for _, row := range rows {
+		if len(eventType) > 0 {
+			fmt.Fprint(output, formatEventType(eventType))
+			fmt.Fprint(output, "\t")
+		}
+		if options.WithNamespace {
+			if obj := row.Object.Object; obj != nil {
+				if m, err := meta.Accessor(obj); err == nil {
+					fmt.Fprint(output, m.GetNamespace())
+				}
+			}
+			fmt.Fprint(output, "\t")
+		}
+
+		for i, cell := range row.Cells {
+			if i != 0 {
+				fmt.Fprint(output, "\t")
+			} else {
+				// TODO: remove this once we drop the legacy printers
+				if options.WithKind && !options.Kind.Empty() {
+					fmt.Fprintf(output, "%s/%s", strings.ToLower(options.Kind.String()), cell)
+					continue
+				}
+			}
+			fmt.Fprint(output, cell)
+		}
+
+		hasLabels := len(options.ColumnLabels) > 0
+		if obj := row.Object.Object; obj != nil && (hasLabels || options.ShowLabels) {
+			if m, err := meta.Accessor(obj); err == nil {
+				for _, value := range labelValues(m.GetLabels(), options) {
+					output.Write([]byte("\t"))
+					output.Write([]byte(value))
+				}
+			}
+		}
+
+		output.Write([]byte("\n"))
+	}
+}
+
+func formatLabelHeaders(columnLabels []string) []string {
+	formHead := make([]string, len(columnLabels))
+	for i, l := range columnLabels {
+		p := strings.Split(l, "/")
+		formHead[i] = strings.ToUpper((p[len(p)-1]))
+	}
+	return formHead
+}
+
+// headers for --show-labels=true
+func formatShowLabelsHeader(showLabels bool) []string {
+	if showLabels {
+		return []string{"LABELS"}
+	}
+	return nil
+}
+
+// labelValues returns a slice of value columns matching the requested print options.
+func labelValues(itemLabels map[string]string, opts PrintOptions) []string {
+	var values []string
+	for _, key := range opts.ColumnLabels {
+		values = append(values, itemLabels[key])
+	}
+	if opts.ShowLabels {
+		values = append(values, labels.FormatLabels(itemLabels))
+	}
+	return values
+}
+
+// appendLabelCells returns a slice of value columns matching the requested print options.
+// Intended for use with tables.
+func appendLabelCells(values []interface{}, itemLabels map[string]string, opts PrintOptions) []interface{} {
+	for _, key := range opts.ColumnLabels {
+		values = append(values, itemLabels[key])
+	}
+	if opts.ShowLabels {
+		values = append(values, labels.FormatLabels(itemLabels))
+	}
+	return values
+}
+
+func printStatus(obj runtime.Object, options PrintOptions) ([]metav1.TableRow, error) {
+	status, ok := obj.(*metav1.Status)
+	if !ok {
+		return nil, fmt.Errorf("expected *v1.Status, got %T", obj)
+	}
+	return []metav1.TableRow{{
+		Object: runtime.RawExtension{Object: obj},
+		Cells:  []interface{}{status.Status, status.Reason, status.Message},
+	}}, nil
+}
+
+func printObjectMeta(obj runtime.Object, options PrintOptions) ([]metav1.TableRow, error) {
+	if meta.IsListType(obj) {
+		rows := make([]metav1.TableRow, 0, 16)
+		err := meta.EachListItem(obj, func(obj runtime.Object) error {
+			nestedRows, err := printObjectMeta(obj, options)
+			if err != nil {
+				return err
+			}
+			rows = append(rows, nestedRows...)
+			return nil
+		})
+		if err != nil {
+			return nil, err
+		}
+		return rows, nil
+	}
+
+	rows := make([]metav1.TableRow, 0, 1)
+	m, err := meta.Accessor(obj)
+	if err != nil {
+		return nil, err
+	}
+	row := metav1.TableRow{
+		Object: runtime.RawExtension{Object: obj},
+	}
+	row.Cells = append(row.Cells, m.GetName(), translateTimestampSince(m.GetCreationTimestamp()))
+	rows = append(rows, row)
+	return rows, nil
+}
+
+// translateTimestampSince returns the elapsed time since timestamp in
+// human-readable approximation.
+func translateTimestampSince(timestamp metav1.Time) string {
+	if timestamp.IsZero() {
+		return "<unknown>"
+	}
+
+	return duration.HumanDuration(time.Since(timestamp.Time))
+}
diff --git a/vendor/k8s.io/cli-runtime/pkg/printers/tabwriter.go b/vendor/k8s.io/cli-runtime/pkg/printers/tabwriter.go
new file mode 100644
index 00000000..21d60e1c
--- /dev/null
+++ b/vendor/k8s.io/cli-runtime/pkg/printers/tabwriter.go
@@ -0,0 +1,36 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package printers
+
+import (
+	"io"
+
+	"github.com/liggitt/tabwriter"
+)
+
+const (
+	tabwriterMinWidth = 6
+	tabwriterWidth    = 4
+	tabwriterPadding  = 3
+	tabwriterPadChar  = ' '
+	tabwriterFlags    = tabwriter.RememberWidths
+)
+
+// GetNewTabWriter returns a tabwriter that translates tabbed columns in input into properly aligned text.
+func GetNewTabWriter(output io.Writer) *tabwriter.Writer {
+	return tabwriter.NewWriter(output, tabwriterMinWidth, tabwriterWidth, tabwriterPadding, tabwriterPadChar, tabwriterFlags)
+}
diff --git a/vendor/k8s.io/cli-runtime/pkg/printers/template.go b/vendor/k8s.io/cli-runtime/pkg/printers/template.go
new file mode 100644
index 00000000..ccff5422
--- /dev/null
+++ b/vendor/k8s.io/cli-runtime/pkg/printers/template.go
@@ -0,0 +1,118 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package printers
+
+import (
+	"encoding/base64"
+	"fmt"
+	"io"
+	"reflect"
+	"text/template"
+
+	"k8s.io/apimachinery/pkg/runtime"
+	"k8s.io/apimachinery/pkg/util/json"
+)
+
+// GoTemplatePrinter is an implementation of ResourcePrinter which formats data with a Go Template.
+type GoTemplatePrinter struct {
+	rawTemplate string
+	template    *template.Template
+}
+
+func NewGoTemplatePrinter(tmpl []byte) (*GoTemplatePrinter, error) {
+	t, err := template.New("output").
+		Funcs(template.FuncMap{
+			"exists":       exists,
+			"base64decode": base64decode,
+		}).
+		Parse(string(tmpl))
+	if err != nil {
+		return nil, err
+	}
+	return &GoTemplatePrinter{
+		rawTemplate: string(tmpl),
+		template:    t,
+	}, nil
+}
+
+// AllowMissingKeys tells the template engine if missing keys are allowed.
+func (p *GoTemplatePrinter) AllowMissingKeys(allow bool) {
+	if allow {
+		p.template.Option("missingkey=default")
+	} else {
+		p.template.Option("missingkey=error")
+	}
+}
+
+// PrintObj formats the obj with the Go Template.
+func (p *GoTemplatePrinter) PrintObj(obj runtime.Object, w io.Writer) error {
+	if InternalObjectPreventer.IsForbidden(reflect.Indirect(reflect.ValueOf(obj)).Type().PkgPath()) {
+		return fmt.Errorf(InternalObjectPrinterErr)
+	}
+
+	var data []byte
+	var err error
+	data, err = json.Marshal(obj)
+	if err != nil {
+		return err
+	}
+
+	out := map[string]interface{}{}
+	if err := json.Unmarshal(data, &out); err != nil {
+		return err
+	}
+	if err = p.safeExecute(w, out); err != nil {
+		// It is way easier to debug this stuff when it shows up in
+		// stdout instead of just stdin. So in addition to returning
+		// a nice error, also print useful stuff with the writer.
+		fmt.Fprintf(w, "Error executing template: %v. Printing more information for debugging the template:\n", err)
+		fmt.Fprintf(w, "\ttemplate was:\n\t\t%v\n", p.rawTemplate)
+		fmt.Fprintf(w, "\traw data was:\n\t\t%v\n", string(data))
+		fmt.Fprintf(w, "\tobject given to template engine was:\n\t\t%+v\n\n", out)
+		return fmt.Errorf("error executing template %q: %v", p.rawTemplate, err)
+	}
+	return nil
+}
+
+// safeExecute tries to execute the template, but catches panics and returns an error
+// should the template engine panic.
+func (p *GoTemplatePrinter) safeExecute(w io.Writer, obj interface{}) error {
+	var panicErr error
+	// Sorry for the double anonymous function. There's probably a clever way
+	// to do this that has the defer'd func setting the value to be returned, but
+	// that would be even less obvious.
+	retErr := func() error {
+		defer func() {
+			if x := recover(); x != nil {
+				panicErr = fmt.Errorf("caught panic: %+v", x)
+			}
+		}()
+		return p.template.Execute(w, obj)
+	}()
+	if panicErr != nil {
+		return panicErr
+	}
+	return retErr
+}
+
+func base64decode(v string) (string, error) {
+	data, err := base64.StdEncoding.DecodeString(v)
+	if err != nil {
+		return "", fmt.Errorf("base64 decode failed: %v", err)
+	}
+	return string(data), nil
+}
diff --git a/vendor/k8s.io/cli-runtime/pkg/printers/typesetter.go b/vendor/k8s.io/cli-runtime/pkg/printers/typesetter.go
new file mode 100644
index 00000000..8d2d9b56
--- /dev/null
+++ b/vendor/k8s.io/cli-runtime/pkg/printers/typesetter.go
@@ -0,0 +1,95 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package printers
+
+import (
+	"fmt"
+	"io"
+
+	"k8s.io/apimachinery/pkg/runtime"
+	"k8s.io/apimachinery/pkg/runtime/schema"
+)
+
+// TypeSetterPrinter is an implementation of ResourcePrinter wraps another printer with types set on the objects
+type TypeSetterPrinter struct {
+	Delegate ResourcePrinter
+
+	Typer runtime.ObjectTyper
+}
+
+// NewTypeSetter constructs a wrapping printer with required params
+func NewTypeSetter(typer runtime.ObjectTyper) *TypeSetterPrinter {
+	return &TypeSetterPrinter{Typer: typer}
+}
+
+// PrintObj is an implementation of ResourcePrinter.PrintObj which sets type information on the obj for the duration
+// of printing.  It is NOT threadsafe.
+func (p *TypeSetterPrinter) PrintObj(obj runtime.Object, w io.Writer) error {
+	if obj == nil {
+		return p.Delegate.PrintObj(obj, w)
+	}
+	if !obj.GetObjectKind().GroupVersionKind().Empty() {
+		return p.Delegate.PrintObj(obj, w)
+	}
+
+	// we were empty coming in, make sure we're empty going out.  This makes the call thread-unsafe
+	defer func() {
+		obj.GetObjectKind().SetGroupVersionKind(schema.GroupVersionKind{})
+	}()
+
+	gvks, _, err := p.Typer.ObjectKinds(obj)
+	if err != nil {
+		// printers wrapped by us expect to find the type information present
+		return fmt.Errorf("missing apiVersion or kind and cannot assign it; %v", err)
+	}
+
+	for _, gvk := range gvks {
+		if len(gvk.Kind) == 0 {
+			continue
+		}
+		if len(gvk.Version) == 0 || gvk.Version == runtime.APIVersionInternal {
+			continue
+		}
+		obj.GetObjectKind().SetGroupVersionKind(gvk)
+		break
+	}
+
+	return p.Delegate.PrintObj(obj, w)
+}
+
+// ToPrinter returns a printer (not threadsafe!) that has been wrapped
+func (p *TypeSetterPrinter) ToPrinter(delegate ResourcePrinter) ResourcePrinter {
+	if p == nil {
+		return delegate
+	}
+
+	p.Delegate = delegate
+	return p
+}
+
+// WrapToPrinter wraps the common ToPrinter method
+func (p *TypeSetterPrinter) WrapToPrinter(delegate ResourcePrinter, err error) (ResourcePrinter, error) {
+	if err != nil {
+		return delegate, err
+	}
+	if p == nil {
+		return delegate, nil
+	}
+
+	p.Delegate = delegate
+	return p, nil
+}
diff --git a/vendor/k8s.io/cli-runtime/pkg/resource/builder.go b/vendor/k8s.io/cli-runtime/pkg/resource/builder.go
new file mode 100644
index 00000000..88314404
--- /dev/null
+++ b/vendor/k8s.io/cli-runtime/pkg/resource/builder.go
@@ -0,0 +1,1193 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package resource
+
+import (
+	"errors"
+	"fmt"
+	"io"
+	"net/url"
+	"os"
+	"strings"
+	"sync"
+
+	"k8s.io/apimachinery/pkg/api/meta"
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
+	"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructuredscheme"
+	"k8s.io/apimachinery/pkg/labels"
+	"k8s.io/apimachinery/pkg/runtime"
+	"k8s.io/apimachinery/pkg/runtime/schema"
+	"k8s.io/apimachinery/pkg/runtime/serializer"
+	utilerrors "k8s.io/apimachinery/pkg/util/errors"
+	"k8s.io/apimachinery/pkg/util/sets"
+	"k8s.io/client-go/restmapper"
+)
+
+var FileExtensions = []string{".json", ".yaml", ".yml"}
+var InputExtensions = append(FileExtensions, "stdin")
+
+const defaultHttpGetAttempts int = 3
+
+// Builder provides convenience functions for taking arguments and parameters
+// from the command line and converting them to a list of resources to iterate
+// over using the Visitor interface.
+type Builder struct {
+	categoryExpanderFn CategoryExpanderFunc
+
+	// mapper is set explicitly by resource builders
+	mapper *mapper
+
+	// clientConfigFn is a function to produce a client, *if* you need one
+	clientConfigFn ClientConfigFunc
+
+	restMapperFn RESTMapperFunc
+
+	// objectTyper is statically determinant per-command invocation based on your internal or unstructured choice
+	// it does not ever need to rely upon discovery.
+	objectTyper runtime.ObjectTyper
+
+	// codecFactory describes which codecs you want to use
+	negotiatedSerializer runtime.NegotiatedSerializer
+
+	// local indicates that we cannot make server calls
+	local bool
+
+	errs []error
+
+	paths  []Visitor
+	stream bool
+	dir    bool
+
+	labelSelector     *string
+	fieldSelector     *string
+	selectAll         bool
+	limitChunks       int64
+	requestTransforms []RequestTransform
+
+	resources []string
+
+	namespace    string
+	allNamespace bool
+	names        []string
+
+	resourceTuples []resourceTuple
+
+	defaultNamespace bool
+	requireNamespace bool
+
+	flatten bool
+	latest  bool
+
+	requireObject bool
+
+	singleResourceType bool
+	continueOnError    bool
+
+	singleItemImplied bool
+
+	export bool
+
+	schema ContentValidator
+
+	// fakeClientFn is used for testing
+	fakeClientFn FakeClientFunc
+}
+
+var missingResourceError = fmt.Errorf(`You must provide one or more resources by argument or filename.
+Example resource specifications include:
+   '-f rsrc.yaml'
+   '--filename=rsrc.json'
+   '<resource> <name>'
+   '<resource>'`)
+
+var LocalResourceError = errors.New(`error: you must specify resources by --filename when --local is set.
+Example resource specifications include:
+   '-f rsrc.yaml'
+   '--filename=rsrc.json'`)
+
+// TODO: expand this to include other errors.
+func IsUsageError(err error) bool {
+	if err == nil {
+		return false
+	}
+	return err == missingResourceError
+}
+
+type FilenameOptions struct {
+	Filenames []string
+	Kustomize string
+	Recursive bool
+}
+
+func (o *FilenameOptions) validate() []error {
+	var errs []error
+	if len(o.Filenames) > 0 && len(o.Kustomize) > 0 {
+		errs = append(errs, fmt.Errorf("only one of -f or -k can be specified"))
+	}
+	if len(o.Kustomize) > 0 && o.Recursive {
+		errs = append(errs, fmt.Errorf("the -k flag can't be used with -f or -R"))
+	}
+	return errs
+}
+
+func (o *FilenameOptions) RequireFilenameOrKustomize() error {
+	if len(o.Filenames) == 0 && len(o.Kustomize) == 0 {
+		return fmt.Errorf("must specify one of -f and -k")
+	}
+	return nil
+}
+
+type resourceTuple struct {
+	Resource string
+	Name     string
+}
+
+type FakeClientFunc func(version schema.GroupVersion) (RESTClient, error)
+
+func NewFakeBuilder(fakeClientFn FakeClientFunc, restMapper RESTMapperFunc, categoryExpander CategoryExpanderFunc) *Builder {
+	ret := newBuilder(nil, restMapper, categoryExpander)
+	ret.fakeClientFn = fakeClientFn
+	return ret
+}
+
+// NewBuilder creates a builder that operates on generic objects. At least one of
+// internal or unstructured must be specified.
+// TODO: Add versioned client (although versioned is still lossy)
+// TODO remove internal and unstructured mapper and instead have them set the negotiated serializer for use in the client
+func newBuilder(clientConfigFn ClientConfigFunc, restMapper RESTMapperFunc, categoryExpander CategoryExpanderFunc) *Builder {
+	return &Builder{
+		clientConfigFn:     clientConfigFn,
+		restMapperFn:       restMapper,
+		categoryExpanderFn: categoryExpander,
+		requireObject:      true,
+	}
+}
+
+func NewBuilder(restClientGetter RESTClientGetter) *Builder {
+	categoryExpanderFn := func() (restmapper.CategoryExpander, error) {
+		discoveryClient, err := restClientGetter.ToDiscoveryClient()
+		if err != nil {
+			return nil, err
+		}
+		return restmapper.NewDiscoveryCategoryExpander(discoveryClient), err
+	}
+
+	return newBuilder(
+		restClientGetter.ToRESTConfig,
+		(&cachingRESTMapperFunc{delegate: restClientGetter.ToRESTMapper}).ToRESTMapper,
+		(&cachingCategoryExpanderFunc{delegate: categoryExpanderFn}).ToCategoryExpander,
+	)
+}
+
+func (b *Builder) Schema(schema ContentValidator) *Builder {
+	b.schema = schema
+	return b
+}
+
+func (b *Builder) AddError(err error) *Builder {
+	if err == nil {
+		return b
+	}
+	b.errs = append(b.errs, err)
+	return b
+}
+
+// FilenameParam groups input in two categories: URLs and files (files, directories, STDIN)
+// If enforceNamespace is false, namespaces in the specs will be allowed to
+// override the default namespace. If it is true, namespaces that don't match
+// will cause an error.
+// If ContinueOnError() is set prior to this method, objects on the path that are not
+// recognized will be ignored (but logged at V(2)).
+func (b *Builder) FilenameParam(enforceNamespace bool, filenameOptions *FilenameOptions) *Builder {
+	if errs := filenameOptions.validate(); len(errs) > 0 {
+		b.errs = append(b.errs, errs...)
+		return b
+	}
+	recursive := filenameOptions.Recursive
+	paths := filenameOptions.Filenames
+	for _, s := range paths {
+		switch {
+		case s == "-":
+			b.Stdin()
+		case strings.Index(s, "http://") == 0 || strings.Index(s, "https://") == 0:
+			url, err := url.Parse(s)
+			if err != nil {
+				b.errs = append(b.errs, fmt.Errorf("the URL passed to filename %q is not valid: %v", s, err))
+				continue
+			}
+			b.URL(defaultHttpGetAttempts, url)
+		default:
+			if !recursive {
+				b.singleItemImplied = true
+			}
+			b.Path(recursive, s)
+		}
+	}
+	if filenameOptions.Kustomize != "" {
+		b.paths = append(b.paths, &KustomizeVisitor{filenameOptions.Kustomize,
+			NewStreamVisitor(nil, b.mapper, filenameOptions.Kustomize, b.schema)})
+	}
+
+	if enforceNamespace {
+		b.RequireNamespace()
+	}
+
+	return b
+}
+
+// Unstructured updates the builder so that it will request and send unstructured
+// objects. Unstructured objects preserve all fields sent by the server in a map format
+// based on the object's JSON structure which means no data is lost when the client
+// reads and then writes an object. Use this mode in preference to Internal unless you
+// are working with Go types directly.
+func (b *Builder) Unstructured() *Builder {
+	if b.mapper != nil {
+		b.errs = append(b.errs, fmt.Errorf("another mapper was already selected, cannot use unstructured types"))
+		return b
+	}
+	b.objectTyper = unstructuredscheme.NewUnstructuredObjectTyper()
+	b.mapper = &mapper{
+		localFn:      b.isLocal,
+		restMapperFn: b.restMapperFn,
+		clientFn:     b.getClient,
+		decoder:      &metadataValidatingDecoder{unstructured.UnstructuredJSONScheme},
+	}
+
+	return b
+}
+
+// WithScheme uses the scheme to manage typing, conversion (optional), and decoding.  If decodingVersions
+// is empty, then you can end up with internal types.  You have been warned.
+func (b *Builder) WithScheme(scheme *runtime.Scheme, decodingVersions ...schema.GroupVersion) *Builder {
+	if b.mapper != nil {
+		b.errs = append(b.errs, fmt.Errorf("another mapper was already selected, cannot use internal types"))
+		return b
+	}
+	b.objectTyper = scheme
+	codecFactory := serializer.NewCodecFactory(scheme)
+	negotiatedSerializer := runtime.NegotiatedSerializer(codecFactory)
+	// if you specified versions, you're specifying a desire for external types, which you don't want to round-trip through
+	// internal types
+	if len(decodingVersions) > 0 {
+		negotiatedSerializer = codecFactory.WithoutConversion()
+	}
+	b.negotiatedSerializer = negotiatedSerializer
+
+	b.mapper = &mapper{
+		localFn:      b.isLocal,
+		restMapperFn: b.restMapperFn,
+		clientFn:     b.getClient,
+		decoder:      codecFactory.UniversalDecoder(decodingVersions...),
+	}
+
+	return b
+}
+
+// LocalParam calls Local() if local is true.
+func (b *Builder) LocalParam(local bool) *Builder {
+	if local {
+		b.Local()
+	}
+	return b
+}
+
+// Local will avoid asking the server for results.
+func (b *Builder) Local() *Builder {
+	b.local = true
+	return b
+}
+
+func (b *Builder) isLocal() bool {
+	return b.local
+}
+
+// Mapper returns a copy of the current mapper.
+func (b *Builder) Mapper() *mapper {
+	mapper := *b.mapper
+	return &mapper
+}
+
+// URL accepts a number of URLs directly.
+func (b *Builder) URL(httpAttemptCount int, urls ...*url.URL) *Builder {
+	for _, u := range urls {
+		b.paths = append(b.paths, &URLVisitor{
+			URL:              u,
+			StreamVisitor:    NewStreamVisitor(nil, b.mapper, u.String(), b.schema),
+			HttpAttemptCount: httpAttemptCount,
+		})
+	}
+	return b
+}
+
+// Stdin will read objects from the standard input. If ContinueOnError() is set
+// prior to this method being called, objects in the stream that are unrecognized
+// will be ignored (but logged at V(2)).
+func (b *Builder) Stdin() *Builder {
+	b.stream = true
+	b.paths = append(b.paths, FileVisitorForSTDIN(b.mapper, b.schema))
+	return b
+}
+
+// Stream will read objects from the provided reader, and if an error occurs will
+// include the name string in the error message. If ContinueOnError() is set
+// prior to this method being called, objects in the stream that are unrecognized
+// will be ignored (but logged at V(2)).
+func (b *Builder) Stream(r io.Reader, name string) *Builder {
+	b.stream = true
+	b.paths = append(b.paths, NewStreamVisitor(r, b.mapper, name, b.schema))
+	return b
+}
+
+// Path accepts a set of paths that may be files, directories (all can containing
+// one or more resources). Creates a FileVisitor for each file and then each
+// FileVisitor is streaming the content to a StreamVisitor. If ContinueOnError() is set
+// prior to this method being called, objects on the path that are unrecognized will be
+// ignored (but logged at V(2)).
+func (b *Builder) Path(recursive bool, paths ...string) *Builder {
+	for _, p := range paths {
+		_, err := os.Stat(p)
+		if os.IsNotExist(err) {
+			b.errs = append(b.errs, fmt.Errorf("the path %q does not exist", p))
+			continue
+		}
+		if err != nil {
+			b.errs = append(b.errs, fmt.Errorf("the path %q cannot be accessed: %v", p, err))
+			continue
+		}
+
+		visitors, err := ExpandPathsToFileVisitors(b.mapper, p, recursive, FileExtensions, b.schema)
+		if err != nil {
+			b.errs = append(b.errs, fmt.Errorf("error reading %q: %v", p, err))
+		}
+		if len(visitors) > 1 {
+			b.dir = true
+		}
+
+		b.paths = append(b.paths, visitors...)
+	}
+	if len(b.paths) == 0 && len(b.errs) == 0 {
+		b.errs = append(b.errs, fmt.Errorf("error reading %v: recognized file extensions are %v", paths, FileExtensions))
+	}
+	return b
+}
+
+// ResourceTypes is a list of types of resources to operate on, when listing objects on
+// the server or retrieving objects that match a selector.
+func (b *Builder) ResourceTypes(types ...string) *Builder {
+	b.resources = append(b.resources, types...)
+	return b
+}
+
+// ResourceNames accepts a default type and one or more names, and creates tuples of
+// resources
+func (b *Builder) ResourceNames(resource string, names ...string) *Builder {
+	for _, name := range names {
+		// See if this input string is of type/name format
+		tuple, ok, err := splitResourceTypeName(name)
+		if err != nil {
+			b.errs = append(b.errs, err)
+			return b
+		}
+
+		if ok {
+			b.resourceTuples = append(b.resourceTuples, tuple)
+			continue
+		}
+		if len(resource) == 0 {
+			b.errs = append(b.errs, fmt.Errorf("the argument %q must be RESOURCE/NAME", name))
+			continue
+		}
+
+		// Use the given default type to create a resource tuple
+		b.resourceTuples = append(b.resourceTuples, resourceTuple{Resource: resource, Name: name})
+	}
+	return b
+}
+
+// LabelSelectorParam defines a selector that should be applied to the object types to load.
+// This will not affect files loaded from disk or URL. If the parameter is empty it is
+// a no-op - to select all resources invoke `b.LabelSelector(labels.Everything.String)`.
+func (b *Builder) LabelSelectorParam(s string) *Builder {
+	selector := strings.TrimSpace(s)
+	if len(selector) == 0 {
+		return b
+	}
+	if b.selectAll {
+		b.errs = append(b.errs, fmt.Errorf("found non-empty label selector %q with previously set 'all' parameter. ", s))
+		return b
+	}
+	return b.LabelSelector(selector)
+}
+
+// LabelSelector accepts a selector directly and will filter the resulting list by that object.
+// Use LabelSelectorParam instead for user input.
+func (b *Builder) LabelSelector(selector string) *Builder {
+	if len(selector) == 0 {
+		return b
+	}
+
+	b.labelSelector = &selector
+	return b
+}
+
+// FieldSelectorParam defines a selector that should be applied to the object types to load.
+// This will not affect files loaded from disk or URL. If the parameter is empty it is
+// a no-op - to select all resources.
+func (b *Builder) FieldSelectorParam(s string) *Builder {
+	s = strings.TrimSpace(s)
+	if len(s) == 0 {
+		return b
+	}
+	if b.selectAll {
+		b.errs = append(b.errs, fmt.Errorf("found non-empty field selector %q with previously set 'all' parameter. ", s))
+		return b
+	}
+	b.fieldSelector = &s
+	return b
+}
+
+// ExportParam accepts the export boolean for these resources
+func (b *Builder) ExportParam(export bool) *Builder {
+	b.export = export
+	return b
+}
+
+// NamespaceParam accepts the namespace that these resources should be
+// considered under from - used by DefaultNamespace() and RequireNamespace()
+func (b *Builder) NamespaceParam(namespace string) *Builder {
+	b.namespace = namespace
+	return b
+}
+
+// DefaultNamespace instructs the builder to set the namespace value for any object found
+// to NamespaceParam() if empty.
+func (b *Builder) DefaultNamespace() *Builder {
+	b.defaultNamespace = true
+	return b
+}
+
+// AllNamespaces instructs the builder to metav1.NamespaceAll as a namespace to request resources
+// across all of the namespace. This overrides the namespace set by NamespaceParam().
+func (b *Builder) AllNamespaces(allNamespace bool) *Builder {
+	if allNamespace {
+		b.namespace = metav1.NamespaceAll
+	}
+	b.allNamespace = allNamespace
+	return b
+}
+
+// RequireNamespace instructs the builder to set the namespace value for any object found
+// to NamespaceParam() if empty, and if the value on the resource does not match
+// NamespaceParam() an error will be returned.
+func (b *Builder) RequireNamespace() *Builder {
+	b.requireNamespace = true
+	return b
+}
+
+// RequestChunksOf attempts to load responses from the server in batches of size limit
+// to avoid long delays loading and transferring very large lists. If unset defaults to
+// no chunking.
+func (b *Builder) RequestChunksOf(chunkSize int64) *Builder {
+	b.limitChunks = chunkSize
+	return b
+}
+
+// TransformRequests alters API calls made by clients requested from this builder. Pass
+// an empty list to clear modifiers.
+func (b *Builder) TransformRequests(opts ...RequestTransform) *Builder {
+	b.requestTransforms = opts
+	return b
+}
+
+// SelectEverythingParam
+func (b *Builder) SelectAllParam(selectAll bool) *Builder {
+	if selectAll && (b.labelSelector != nil || b.fieldSelector != nil) {
+		b.errs = append(b.errs, fmt.Errorf("setting 'all' parameter but found a non empty selector. "))
+		return b
+	}
+	b.selectAll = selectAll
+	return b
+}
+
+// ResourceTypeOrNameArgs indicates that the builder should accept arguments
+// of the form `(<type1>[,<type2>,...]|<type> <name1>[,<name2>,...])`. When one argument is
+// received, the types provided will be retrieved from the server (and be comma delimited).
+// When two or more arguments are received, they must be a single type and resource name(s).
+// The allowEmptySelector permits to select all the resources (via Everything func).
+func (b *Builder) ResourceTypeOrNameArgs(allowEmptySelector bool, args ...string) *Builder {
+	args = normalizeMultipleResourcesArgs(args)
+	if ok, err := hasCombinedTypeArgs(args); ok {
+		if err != nil {
+			b.errs = append(b.errs, err)
+			return b
+		}
+		for _, s := range args {
+			tuple, ok, err := splitResourceTypeName(s)
+			if err != nil {
+				b.errs = append(b.errs, err)
+				return b
+			}
+			if ok {
+				b.resourceTuples = append(b.resourceTuples, tuple)
+			}
+		}
+		return b
+	}
+	if len(args) > 0 {
+		// Try replacing aliases only in types
+		args[0] = b.ReplaceAliases(args[0])
+	}
+	switch {
+	case len(args) > 2:
+		b.names = append(b.names, args[1:]...)
+		b.ResourceTypes(SplitResourceArgument(args[0])...)
+	case len(args) == 2:
+		b.names = append(b.names, args[1])
+		b.ResourceTypes(SplitResourceArgument(args[0])...)
+	case len(args) == 1:
+		b.ResourceTypes(SplitResourceArgument(args[0])...)
+		if b.labelSelector == nil && allowEmptySelector {
+			selector := labels.Everything().String()
+			b.labelSelector = &selector
+		}
+	case len(args) == 0:
+	default:
+		b.errs = append(b.errs, fmt.Errorf("arguments must consist of a resource or a resource and name"))
+	}
+	return b
+}
+
+// ReplaceAliases accepts an argument and tries to expand any existing
+// aliases found in it
+func (b *Builder) ReplaceAliases(input string) string {
+	replaced := []string{}
+	for _, arg := range strings.Split(input, ",") {
+		if b.categoryExpanderFn == nil {
+			continue
+		}
+		categoryExpander, err := b.categoryExpanderFn()
+		if err != nil {
+			b.AddError(err)
+			continue
+		}
+
+		if resources, ok := categoryExpander.Expand(arg); ok {
+			asStrings := []string{}
+			for _, resource := range resources {
+				if len(resource.Group) == 0 {
+					asStrings = append(asStrings, resource.Resource)
+					continue
+				}
+				asStrings = append(asStrings, resource.Resource+"."+resource.Group)
+			}
+			arg = strings.Join(asStrings, ",")
+		}
+		replaced = append(replaced, arg)
+	}
+	return strings.Join(replaced, ",")
+}
+
+func hasCombinedTypeArgs(args []string) (bool, error) {
+	hasSlash := 0
+	for _, s := range args {
+		if strings.Contains(s, "/") {
+			hasSlash++
+		}
+	}
+	switch {
+	case hasSlash > 0 && hasSlash == len(args):
+		return true, nil
+	case hasSlash > 0 && hasSlash != len(args):
+		baseCmd := "cmd"
+		if len(os.Args) > 0 {
+			baseCmdSlice := strings.Split(os.Args[0], "/")
+			baseCmd = baseCmdSlice[len(baseCmdSlice)-1]
+		}
+		return true, fmt.Errorf("there is no need to specify a resource type as a separate argument when passing arguments in resource/name form (e.g. '%s get resource/<resource_name>' instead of '%s get resource resource/<resource_name>'", baseCmd, baseCmd)
+	default:
+		return false, nil
+	}
+}
+
+// Normalize args convert multiple resources to resource tuples, a,b,c d
+// as a transform to a/d b/d c/d
+func normalizeMultipleResourcesArgs(args []string) []string {
+	if len(args) >= 2 {
+		resources := []string{}
+		resources = append(resources, SplitResourceArgument(args[0])...)
+		if len(resources) > 1 {
+			names := []string{}
+			names = append(names, args[1:]...)
+			newArgs := []string{}
+			for _, resource := range resources {
+				for _, name := range names {
+					newArgs = append(newArgs, strings.Join([]string{resource, name}, "/"))
+				}
+			}
+			return newArgs
+		}
+	}
+	return args
+}
+
+// splitResourceTypeName handles type/name resource formats and returns a resource tuple
+// (empty or not), whether it successfully found one, and an error
+func splitResourceTypeName(s string) (resourceTuple, bool, error) {
+	if !strings.Contains(s, "/") {
+		return resourceTuple{}, false, nil
+	}
+	seg := strings.Split(s, "/")
+	if len(seg) != 2 {
+		return resourceTuple{}, false, fmt.Errorf("arguments in resource/name form may not have more than one slash")
+	}
+	resource, name := seg[0], seg[1]
+	if len(resource) == 0 || len(name) == 0 || len(SplitResourceArgument(resource)) != 1 {
+		return resourceTuple{}, false, fmt.Errorf("arguments in resource/name form must have a single resource and name")
+	}
+	return resourceTuple{Resource: resource, Name: name}, true, nil
+}
+
+// Flatten will convert any objects with a field named "Items" that is an array of runtime.Object
+// compatible types into individual entries and give them their own items. The original object
+// is not passed to any visitors.
+func (b *Builder) Flatten() *Builder {
+	b.flatten = true
+	return b
+}
+
+// Latest will fetch the latest copy of any objects loaded from URLs or files from the server.
+func (b *Builder) Latest() *Builder {
+	b.latest = true
+	return b
+}
+
+// RequireObject ensures that resulting infos have an object set. If false, resulting info may not have an object set.
+func (b *Builder) RequireObject(require bool) *Builder {
+	b.requireObject = require
+	return b
+}
+
+// ContinueOnError will attempt to load and visit as many objects as possible, even if some visits
+// return errors or some objects cannot be loaded. The default behavior is to terminate after
+// the first error is returned from a VisitorFunc.
+func (b *Builder) ContinueOnError() *Builder {
+	b.continueOnError = true
+	return b
+}
+
+// SingleResourceType will cause the builder to error if the user specifies more than a single type
+// of resource.
+func (b *Builder) SingleResourceType() *Builder {
+	b.singleResourceType = true
+	return b
+}
+
+// mappingFor returns the RESTMapping for the Kind given, or the Kind referenced by the resource.
+// Prefers a fully specified GroupVersionResource match. If one is not found, we match on a fully
+// specified GroupVersionKind, or fallback to a match on GroupKind.
+func (b *Builder) mappingFor(resourceOrKindArg string) (*meta.RESTMapping, error) {
+	fullySpecifiedGVR, groupResource := schema.ParseResourceArg(resourceOrKindArg)
+	gvk := schema.GroupVersionKind{}
+	restMapper, err := b.restMapperFn()
+	if err != nil {
+		return nil, err
+	}
+
+	if fullySpecifiedGVR != nil {
+		gvk, _ = restMapper.KindFor(*fullySpecifiedGVR)
+	}
+	if gvk.Empty() {
+		gvk, _ = restMapper.KindFor(groupResource.WithVersion(""))
+	}
+	if !gvk.Empty() {
+		return restMapper.RESTMapping(gvk.GroupKind(), gvk.Version)
+	}
+
+	fullySpecifiedGVK, groupKind := schema.ParseKindArg(resourceOrKindArg)
+	if fullySpecifiedGVK == nil {
+		gvk := groupKind.WithVersion("")
+		fullySpecifiedGVK = &gvk
+	}
+
+	if !fullySpecifiedGVK.Empty() {
+		if mapping, err := restMapper.RESTMapping(fullySpecifiedGVK.GroupKind(), fullySpecifiedGVK.Version); err == nil {
+			return mapping, nil
+		}
+	}
+
+	mapping, err := restMapper.RESTMapping(groupKind, gvk.Version)
+	if err != nil {
+		// if we error out here, it is because we could not match a resource or a kind
+		// for the given argument. To maintain consistency with previous behavior,
+		// announce that a resource type could not be found.
+		// if the error is _not_ a *meta.NoKindMatchError, then we had trouble doing discovery,
+		// so we should return the original error since it may help a user diagnose what is actually wrong
+		if meta.IsNoMatchError(err) {
+			return nil, fmt.Errorf("the server doesn't have a resource type %q", groupResource.Resource)
+		}
+		return nil, err
+	}
+
+	return mapping, nil
+}
+
+func (b *Builder) resourceMappings() ([]*meta.RESTMapping, error) {
+	if len(b.resources) > 1 && b.singleResourceType {
+		return nil, fmt.Errorf("you may only specify a single resource type")
+	}
+	mappings := []*meta.RESTMapping{}
+	seen := map[schema.GroupVersionKind]bool{}
+	for _, r := range b.resources {
+		mapping, err := b.mappingFor(r)
+		if err != nil {
+			return nil, err
+		}
+		// This ensures the mappings for resources(shortcuts, plural) unique
+		if seen[mapping.GroupVersionKind] {
+			continue
+		}
+		seen[mapping.GroupVersionKind] = true
+
+		mappings = append(mappings, mapping)
+	}
+	return mappings, nil
+}
+
+func (b *Builder) resourceTupleMappings() (map[string]*meta.RESTMapping, error) {
+	mappings := make(map[string]*meta.RESTMapping)
+	canonical := make(map[schema.GroupVersionResource]struct{})
+	for _, r := range b.resourceTuples {
+		if _, ok := mappings[r.Resource]; ok {
+			continue
+		}
+		mapping, err := b.mappingFor(r.Resource)
+		if err != nil {
+			return nil, err
+		}
+
+		mappings[r.Resource] = mapping
+		canonical[mapping.Resource] = struct{}{}
+	}
+	if len(canonical) > 1 && b.singleResourceType {
+		return nil, fmt.Errorf("you may only specify a single resource type")
+	}
+	return mappings, nil
+}
+
+func (b *Builder) visitorResult() *Result {
+	if len(b.errs) > 0 {
+		return &Result{err: utilerrors.NewAggregate(b.errs)}
+	}
+
+	if b.selectAll {
+		selector := labels.Everything().String()
+		b.labelSelector = &selector
+	}
+
+	// visit items specified by paths
+	if len(b.paths) != 0 {
+		return b.visitByPaths()
+	}
+
+	// visit selectors
+	if b.labelSelector != nil || b.fieldSelector != nil {
+		return b.visitBySelector()
+	}
+
+	// visit items specified by resource and name
+	if len(b.resourceTuples) != 0 {
+		return b.visitByResource()
+	}
+
+	// visit items specified by name
+	if len(b.names) != 0 {
+		return b.visitByName()
+	}
+
+	if len(b.resources) != 0 {
+		for _, r := range b.resources {
+			_, err := b.mappingFor(r)
+			if err != nil {
+				return &Result{err: err}
+			}
+		}
+		return &Result{err: fmt.Errorf("resource(s) were provided, but no name, label selector, or --all flag specified")}
+	}
+	return &Result{err: missingResourceError}
+}
+
+func (b *Builder) visitBySelector() *Result {
+	result := &Result{
+		targetsSingleItems: false,
+	}
+
+	if len(b.names) != 0 {
+		return result.withError(fmt.Errorf("name cannot be provided when a selector is specified"))
+	}
+	if len(b.resourceTuples) != 0 {
+		return result.withError(fmt.Errorf("selectors and the all flag cannot be used when passing resource/name arguments"))
+	}
+	if len(b.resources) == 0 {
+		return result.withError(fmt.Errorf("at least one resource must be specified to use a selector"))
+	}
+	mappings, err := b.resourceMappings()
+	if err != nil {
+		result.err = err
+		return result
+	}
+
+	var labelSelector, fieldSelector string
+	if b.labelSelector != nil {
+		labelSelector = *b.labelSelector
+	}
+	if b.fieldSelector != nil {
+		fieldSelector = *b.fieldSelector
+	}
+
+	visitors := []Visitor{}
+	for _, mapping := range mappings {
+		client, err := b.getClient(mapping.GroupVersionKind.GroupVersion())
+		if err != nil {
+			result.err = err
+			return result
+		}
+		selectorNamespace := b.namespace
+		if mapping.Scope.Name() != meta.RESTScopeNameNamespace {
+			selectorNamespace = ""
+		}
+		visitors = append(visitors, NewSelector(client, mapping, selectorNamespace, labelSelector, fieldSelector, b.export, b.limitChunks))
+	}
+	if b.continueOnError {
+		result.visitor = EagerVisitorList(visitors)
+	} else {
+		result.visitor = VisitorList(visitors)
+	}
+	result.sources = visitors
+	return result
+}
+
+func (b *Builder) getClient(gv schema.GroupVersion) (RESTClient, error) {
+	var (
+		client RESTClient
+		err    error
+	)
+
+	switch {
+	case b.fakeClientFn != nil:
+		client, err = b.fakeClientFn(gv)
+	case b.negotiatedSerializer != nil:
+		client, err = b.clientConfigFn.clientForGroupVersion(gv, b.negotiatedSerializer)
+	default:
+		client, err = b.clientConfigFn.unstructuredClientForGroupVersion(gv)
+	}
+
+	if err != nil {
+		return nil, err
+	}
+
+	return NewClientWithOptions(client, b.requestTransforms...), nil
+}
+
+func (b *Builder) visitByResource() *Result {
+	// if b.singleItemImplied is false, this could be by default, so double-check length
+	// of resourceTuples to determine if in fact it is singleItemImplied or not
+	isSingleItemImplied := b.singleItemImplied
+	if !isSingleItemImplied {
+		isSingleItemImplied = len(b.resourceTuples) == 1
+	}
+
+	result := &Result{
+		singleItemImplied:  isSingleItemImplied,
+		targetsSingleItems: true,
+	}
+
+	if len(b.resources) != 0 {
+		return result.withError(fmt.Errorf("you may not specify individual resources and bulk resources in the same call"))
+	}
+
+	// retrieve one client for each resource
+	mappings, err := b.resourceTupleMappings()
+	if err != nil {
+		result.err = err
+		return result
+	}
+	clients := make(map[string]RESTClient)
+	for _, mapping := range mappings {
+		s := fmt.Sprintf("%s/%s", mapping.GroupVersionKind.GroupVersion().String(), mapping.Resource.Resource)
+		if _, ok := clients[s]; ok {
+			continue
+		}
+		client, err := b.getClient(mapping.GroupVersionKind.GroupVersion())
+		if err != nil {
+			result.err = err
+			return result
+		}
+		clients[s] = client
+	}
+
+	items := []Visitor{}
+	for _, tuple := range b.resourceTuples {
+		mapping, ok := mappings[tuple.Resource]
+		if !ok {
+			return result.withError(fmt.Errorf("resource %q is not recognized: %v", tuple.Resource, mappings))
+		}
+		s := fmt.Sprintf("%s/%s", mapping.GroupVersionKind.GroupVersion().String(), mapping.Resource.Resource)
+		client, ok := clients[s]
+		if !ok {
+			return result.withError(fmt.Errorf("could not find a client for resource %q", tuple.Resource))
+		}
+
+		selectorNamespace := b.namespace
+		if mapping.Scope.Name() != meta.RESTScopeNameNamespace {
+			selectorNamespace = ""
+		} else {
+			if len(b.namespace) == 0 {
+				errMsg := "namespace may not be empty when retrieving a resource by name"
+				if b.allNamespace {
+					errMsg = "a resource cannot be retrieved by name across all namespaces"
+				}
+				return result.withError(fmt.Errorf(errMsg))
+			}
+		}
+
+		info := &Info{
+			Client:    client,
+			Mapping:   mapping,
+			Namespace: selectorNamespace,
+			Name:      tuple.Name,
+			Export:    b.export,
+		}
+		items = append(items, info)
+	}
+
+	var visitors Visitor
+	if b.continueOnError {
+		visitors = EagerVisitorList(items)
+	} else {
+		visitors = VisitorList(items)
+	}
+	result.visitor = visitors
+	result.sources = items
+	return result
+}
+
+func (b *Builder) visitByName() *Result {
+	result := &Result{
+		singleItemImplied:  len(b.names) == 1,
+		targetsSingleItems: true,
+	}
+
+	if len(b.paths) != 0 {
+		return result.withError(fmt.Errorf("when paths, URLs, or stdin is provided as input, you may not specify a resource by arguments as well"))
+	}
+	if len(b.resources) == 0 {
+		return result.withError(fmt.Errorf("you must provide a resource and a resource name together"))
+	}
+	if len(b.resources) > 1 {
+		return result.withError(fmt.Errorf("you must specify only one resource"))
+	}
+
+	mappings, err := b.resourceMappings()
+	if err != nil {
+		result.err = err
+		return result
+	}
+	mapping := mappings[0]
+
+	client, err := b.getClient(mapping.GroupVersionKind.GroupVersion())
+	if err != nil {
+		result.err = err
+		return result
+	}
+
+	selectorNamespace := b.namespace
+	if mapping.Scope.Name() != meta.RESTScopeNameNamespace {
+		selectorNamespace = ""
+	} else {
+		if len(b.namespace) == 0 {
+			errMsg := "namespace may not be empty when retrieving a resource by name"
+			if b.allNamespace {
+				errMsg = "a resource cannot be retrieved by name across all namespaces"
+			}
+			return result.withError(fmt.Errorf(errMsg))
+		}
+	}
+
+	visitors := []Visitor{}
+	for _, name := range b.names {
+		info := &Info{
+			Client:    client,
+			Mapping:   mapping,
+			Namespace: selectorNamespace,
+			Name:      name,
+			Export:    b.export,
+		}
+		visitors = append(visitors, info)
+	}
+	result.visitor = VisitorList(visitors)
+	result.sources = visitors
+	return result
+}
+
+func (b *Builder) visitByPaths() *Result {
+	result := &Result{
+		singleItemImplied:  !b.dir && !b.stream && len(b.paths) == 1,
+		targetsSingleItems: true,
+	}
+
+	if len(b.resources) != 0 {
+		return result.withError(fmt.Errorf("when paths, URLs, or stdin is provided as input, you may not specify resource arguments as well"))
+	}
+	if len(b.names) != 0 {
+		return result.withError(fmt.Errorf("name cannot be provided when a path is specified"))
+	}
+	if len(b.resourceTuples) != 0 {
+		return result.withError(fmt.Errorf("resource/name arguments cannot be provided when a path is specified"))
+	}
+
+	var visitors Visitor
+	if b.continueOnError {
+		visitors = EagerVisitorList(b.paths)
+	} else {
+		visitors = VisitorList(b.paths)
+	}
+
+	if b.flatten {
+		visitors = NewFlattenListVisitor(visitors, b.objectTyper, b.mapper)
+	}
+
+	// only items from disk can be refetched
+	if b.latest {
+		// must set namespace prior to fetching
+		if b.defaultNamespace {
+			visitors = NewDecoratedVisitor(visitors, SetNamespace(b.namespace))
+		}
+		visitors = NewDecoratedVisitor(visitors, RetrieveLatest)
+	}
+	if b.labelSelector != nil {
+		selector, err := labels.Parse(*b.labelSelector)
+		if err != nil {
+			return result.withError(fmt.Errorf("the provided selector %q is not valid: %v", *b.labelSelector, err))
+		}
+		visitors = NewFilteredVisitor(visitors, FilterByLabelSelector(selector))
+	}
+	result.visitor = visitors
+	result.sources = b.paths
+	return result
+}
+
+// Do returns a Result object with a Visitor for the resources identified by the Builder.
+// The visitor will respect the error behavior specified by ContinueOnError. Note that stream
+// inputs are consumed by the first execution - use Infos() or Object() on the Result to capture a list
+// for further iteration.
+func (b *Builder) Do() *Result {
+	r := b.visitorResult()
+	r.mapper = b.Mapper()
+	if r.err != nil {
+		return r
+	}
+	if b.flatten {
+		r.visitor = NewFlattenListVisitor(r.visitor, b.objectTyper, b.mapper)
+	}
+	helpers := []VisitorFunc{}
+	if b.defaultNamespace {
+		helpers = append(helpers, SetNamespace(b.namespace))
+	}
+	if b.requireNamespace {
+		helpers = append(helpers, RequireNamespace(b.namespace))
+	}
+	helpers = append(helpers, FilterNamespace)
+	if b.requireObject {
+		helpers = append(helpers, RetrieveLazy)
+	}
+	if b.continueOnError {
+		r.visitor = NewDecoratedVisitor(ContinueOnErrorVisitor{r.visitor}, helpers...)
+	} else {
+		r.visitor = NewDecoratedVisitor(r.visitor, helpers...)
+	}
+	return r
+}
+
+// SplitResourceArgument splits the argument with commas and returns unique
+// strings in the original order.
+func SplitResourceArgument(arg string) []string {
+	out := []string{}
+	set := sets.NewString()
+	for _, s := range strings.Split(arg, ",") {
+		if set.Has(s) {
+			continue
+		}
+		set.Insert(s)
+		out = append(out, s)
+	}
+	return out
+}
+
+// HasNames returns true if the provided args contain resource names
+func HasNames(args []string) (bool, error) {
+	args = normalizeMultipleResourcesArgs(args)
+	hasCombinedTypes, err := hasCombinedTypeArgs(args)
+	if err != nil {
+		return false, err
+	}
+	return hasCombinedTypes || len(args) > 1, nil
+}
+
+type cachingRESTMapperFunc struct {
+	delegate RESTMapperFunc
+
+	lock   sync.Mutex
+	cached meta.RESTMapper
+}
+
+func (c *cachingRESTMapperFunc) ToRESTMapper() (meta.RESTMapper, error) {
+	c.lock.Lock()
+	defer c.lock.Unlock()
+	if c.cached != nil {
+		return c.cached, nil
+	}
+
+	ret, err := c.delegate()
+	if err != nil {
+		return nil, err
+	}
+	c.cached = ret
+	return c.cached, nil
+}
+
+type cachingCategoryExpanderFunc struct {
+	delegate CategoryExpanderFunc
+
+	lock   sync.Mutex
+	cached restmapper.CategoryExpander
+}
+
+func (c *cachingCategoryExpanderFunc) ToCategoryExpander() (restmapper.CategoryExpander, error) {
+	c.lock.Lock()
+	defer c.lock.Unlock()
+	if c.cached != nil {
+		return c.cached, nil
+	}
+
+	ret, err := c.delegate()
+	if err != nil {
+		return nil, err
+	}
+	c.cached = ret
+	return c.cached, nil
+}
diff --git a/vendor/k8s.io/cli-runtime/pkg/resource/client.go b/vendor/k8s.io/cli-runtime/pkg/resource/client.go
new file mode 100644
index 00000000..46380207
--- /dev/null
+++ b/vendor/k8s.io/cli-runtime/pkg/resource/client.go
@@ -0,0 +1,58 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package resource
+
+import (
+	"k8s.io/apimachinery/pkg/runtime"
+	"k8s.io/apimachinery/pkg/runtime/schema"
+	"k8s.io/client-go/rest"
+)
+
+// TODO require negotiatedSerializer.  leaving it optional lets us plumb current behavior and deal with the difference after major plumbing is complete
+func (clientConfigFn ClientConfigFunc) clientForGroupVersion(gv schema.GroupVersion, negotiatedSerializer runtime.NegotiatedSerializer) (RESTClient, error) {
+	cfg, err := clientConfigFn()
+	if err != nil {
+		return nil, err
+	}
+	if negotiatedSerializer != nil {
+		cfg.ContentConfig.NegotiatedSerializer = negotiatedSerializer
+	}
+	cfg.GroupVersion = &gv
+	if len(gv.Group) == 0 {
+		cfg.APIPath = "/api"
+	} else {
+		cfg.APIPath = "/apis"
+	}
+
+	return rest.RESTClientFor(cfg)
+}
+
+func (clientConfigFn ClientConfigFunc) unstructuredClientForGroupVersion(gv schema.GroupVersion) (RESTClient, error) {
+	cfg, err := clientConfigFn()
+	if err != nil {
+		return nil, err
+	}
+	cfg.ContentConfig = UnstructuredPlusDefaultContentConfig()
+	cfg.GroupVersion = &gv
+	if len(gv.Group) == 0 {
+		cfg.APIPath = "/api"
+	} else {
+		cfg.APIPath = "/apis"
+	}
+
+	return rest.RESTClientFor(cfg)
+}
diff --git a/vendor/k8s.io/cli-runtime/pkg/resource/crd_finder.go b/vendor/k8s.io/cli-runtime/pkg/resource/crd_finder.go
new file mode 100644
index 00000000..5dab3e46
--- /dev/null
+++ b/vendor/k8s.io/cli-runtime/pkg/resource/crd_finder.go
@@ -0,0 +1,110 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package resource
+
+import (
+	"context"
+	"fmt"
+	"reflect"
+
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
+	"k8s.io/apimachinery/pkg/runtime/schema"
+	"k8s.io/client-go/dynamic"
+)
+
+// CRDGetter is a function that can download the list of GVK for all
+// CRDs.
+type CRDGetter func() ([]schema.GroupKind, error)
+
+func CRDFromDynamic(client dynamic.Interface) CRDGetter {
+	return func() ([]schema.GroupKind, error) {
+		list, err := client.Resource(schema.GroupVersionResource{
+			Group:    "apiextensions.k8s.io",
+			Version:  "v1beta1",
+			Resource: "customresourcedefinitions",
+		}).List(context.TODO(), metav1.ListOptions{})
+		if err != nil {
+			return nil, fmt.Errorf("failed to list CRDs: %v", err)
+		}
+		if list == nil {
+			return nil, nil
+		}
+
+		gks := []schema.GroupKind{}
+
+		// We need to parse the list to get the gvk, I guess that's fine.
+		for _, crd := range (*list).Items {
+			// Look for group, version, and kind
+			group, _, _ := unstructured.NestedString(crd.Object, "spec", "group")
+			kind, _, _ := unstructured.NestedString(crd.Object, "spec", "names", "kind")
+
+			gks = append(gks, schema.GroupKind{
+				Group: group,
+				Kind:  kind,
+			})
+		}
+
+		return gks, nil
+	}
+}
+
+// CRDFinder keeps a cache of known CRDs and finds a given GVK in the
+// list.
+type CRDFinder interface {
+	HasCRD(gvk schema.GroupKind) (bool, error)
+}
+
+func NewCRDFinder(getter CRDGetter) CRDFinder {
+	return &crdFinder{
+		getter: getter,
+	}
+}
+
+type crdFinder struct {
+	getter CRDGetter
+	cache  *[]schema.GroupKind
+}
+
+func (f *crdFinder) cacheCRDs() error {
+	if f.cache != nil {
+		return nil
+	}
+
+	list, err := f.getter()
+	if err != nil {
+		return err
+	}
+	f.cache = &list
+	return nil
+}
+
+func (f *crdFinder) findCRD(gvk schema.GroupKind) bool {
+	for _, crd := range *f.cache {
+		if reflect.DeepEqual(gvk, crd) {
+			return true
+		}
+	}
+	return false
+}
+
+func (f *crdFinder) HasCRD(gvk schema.GroupKind) (bool, error) {
+	if err := f.cacheCRDs(); err != nil {
+		return false, err
+	}
+	return f.findCRD(gvk), nil
+}
diff --git a/vendor/k8s.io/cli-runtime/pkg/resource/doc.go b/vendor/k8s.io/cli-runtime/pkg/resource/doc.go
new file mode 100644
index 00000000..f83fdcbf
--- /dev/null
+++ b/vendor/k8s.io/cli-runtime/pkg/resource/doc.go
@@ -0,0 +1,24 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package resource assists clients in dealing with RESTful objects that match the
+// Kubernetes API conventions. The Helper object provides simple CRUD operations
+// on resources. The Visitor interface makes it easy to deal with multiple resources
+// in bulk for retrieval and operation. The Builder object simplifies converting
+// standard command line arguments and parameters into a Visitor that can iterate
+// over all of the identified resources, whether on the server or on the local
+// filesystem.
+package resource // import "k8s.io/cli-runtime/pkg/resource"
diff --git a/vendor/k8s.io/cli-runtime/pkg/resource/dry_run_verifier.go b/vendor/k8s.io/cli-runtime/pkg/resource/dry_run_verifier.go
new file mode 100644
index 00000000..aac123e5
--- /dev/null
+++ b/vendor/k8s.io/cli-runtime/pkg/resource/dry_run_verifier.go
@@ -0,0 +1,121 @@
+/*
+Copyright 2019 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package resource
+
+import (
+	"errors"
+	"fmt"
+
+	openapi_v2 "github.com/googleapis/gnostic/OpenAPIv2"
+	yaml "gopkg.in/yaml.v2"
+	"k8s.io/apimachinery/pkg/runtime/schema"
+	"k8s.io/client-go/discovery"
+	"k8s.io/client-go/dynamic"
+)
+
+// VerifyDryRun returns nil if a resource group-version-kind supports
+// server-side dry-run. Otherwise, an error is returned.
+func VerifyDryRun(gvk schema.GroupVersionKind, dynamicClient dynamic.Interface, discoveryClient discovery.DiscoveryInterface) error {
+	verifier := NewDryRunVerifier(dynamicClient, discoveryClient)
+	return verifier.HasSupport(gvk)
+}
+
+func NewDryRunVerifier(dynamicClient dynamic.Interface, discoveryClient discovery.DiscoveryInterface) *DryRunVerifier {
+	return &DryRunVerifier{
+		finder:        NewCRDFinder(CRDFromDynamic(dynamicClient)),
+		openAPIGetter: discoveryClient,
+	}
+}
+
+func hasGVKExtension(extensions []*openapi_v2.NamedAny, gvk schema.GroupVersionKind) bool {
+	for _, extension := range extensions {
+		if extension.GetValue().GetYaml() == "" ||
+			extension.GetName() != "x-kubernetes-group-version-kind" {
+			continue
+		}
+		var value map[string]string
+		err := yaml.Unmarshal([]byte(extension.GetValue().GetYaml()), &value)
+		if err != nil {
+			continue
+		}
+
+		if value["group"] == gvk.Group && value["kind"] == gvk.Kind && value["version"] == gvk.Version {
+			return true
+		}
+		return false
+	}
+	return false
+}
+
+// DryRunVerifier verifies if a given group-version-kind supports DryRun
+// against the current server. Sending dryRun requests to apiserver that
+// don't support it will result in objects being unwillingly persisted.
+//
+// It reads the OpenAPI to see if the given GVK supports dryRun. If the
+// GVK can not be found, we assume that CRDs will have the same level of
+// support as "namespaces", and non-CRDs will not be supported. We
+// delay the check for CRDs as much as possible though, since it
+// requires an extra round-trip to the server.
+type DryRunVerifier struct {
+	finder        CRDFinder
+	openAPIGetter discovery.OpenAPISchemaInterface
+}
+
+// HasSupport verifies if the given gvk supports DryRun. An error is
+// returned if it doesn't.
+func (v *DryRunVerifier) HasSupport(gvk schema.GroupVersionKind) error {
+	oapi, err := v.openAPIGetter.OpenAPISchema()
+	if err != nil {
+		return fmt.Errorf("failed to download openapi: %v", err)
+	}
+	supports, err := supportsDryRun(oapi, gvk)
+	if err != nil {
+		// We assume that we couldn't find the type, then check for namespace:
+		supports, _ = supportsDryRun(oapi, schema.GroupVersionKind{Group: "", Version: "v1", Kind: "Namespace"})
+		// If namespace supports dryRun, then we will support dryRun for CRDs only.
+		if supports {
+			supports, err = v.finder.HasCRD(gvk.GroupKind())
+			if err != nil {
+				return fmt.Errorf("failed to check CRD: %v", err)
+			}
+		}
+	}
+	if !supports {
+		return fmt.Errorf("%v doesn't support dry-run", gvk)
+	}
+	return nil
+}
+
+// supportsDryRun is a method that let's us look in the OpenAPI if the
+// specific group-version-kind supports the dryRun query parameter for
+// the PATCH end-point.
+func supportsDryRun(doc *openapi_v2.Document, gvk schema.GroupVersionKind) (bool, error) {
+	for _, path := range doc.GetPaths().GetPath() {
+		// Is this describing the gvk we're looking for?
+		if !hasGVKExtension(path.GetValue().GetPatch().GetVendorExtension(), gvk) {
+			continue
+		}
+		for _, param := range path.GetValue().GetPatch().GetParameters() {
+			if param.GetParameter().GetNonBodyParameter().GetQueryParameterSubSchema().GetName() == "dryRun" {
+				return true, nil
+			}
+		}
+		return false, nil
+	}
+
+	return false, errors.New("couldn't find GVK in openapi")
+}
diff --git a/vendor/k8s.io/cli-runtime/pkg/resource/fake.go b/vendor/k8s.io/cli-runtime/pkg/resource/fake.go
new file mode 100644
index 00000000..276c343e
--- /dev/null
+++ b/vendor/k8s.io/cli-runtime/pkg/resource/fake.go
@@ -0,0 +1,40 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package resource
+
+import (
+	"k8s.io/apimachinery/pkg/runtime/schema"
+	"k8s.io/client-go/restmapper"
+)
+
+// FakeCategoryExpander is for testing only
+var FakeCategoryExpander restmapper.CategoryExpander = restmapper.SimpleCategoryExpander{
+	Expansions: map[string][]schema.GroupResource{
+		"all": {
+			{Group: "", Resource: "pods"},
+			{Group: "", Resource: "replicationcontrollers"},
+			{Group: "", Resource: "services"},
+			{Group: "apps", Resource: "statefulsets"},
+			{Group: "autoscaling", Resource: "horizontalpodautoscalers"},
+			{Group: "batch", Resource: "jobs"},
+			{Group: "batch", Resource: "cronjobs"},
+			{Group: "extensions", Resource: "daemonsets"},
+			{Group: "extensions", Resource: "deployments"},
+			{Group: "extensions", Resource: "replicasets"},
+		},
+	},
+}
diff --git a/vendor/k8s.io/cli-runtime/pkg/resource/helper.go b/vendor/k8s.io/cli-runtime/pkg/resource/helper.go
new file mode 100644
index 00000000..beebd805
--- /dev/null
+++ b/vendor/k8s.io/cli-runtime/pkg/resource/helper.go
@@ -0,0 +1,228 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package resource
+
+import (
+	"context"
+	"strconv"
+
+	"k8s.io/apimachinery/pkg/api/meta"
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	"k8s.io/apimachinery/pkg/fields"
+	"k8s.io/apimachinery/pkg/runtime"
+	"k8s.io/apimachinery/pkg/types"
+	"k8s.io/apimachinery/pkg/watch"
+)
+
+var metadataAccessor = meta.NewAccessor()
+
+// Helper provides methods for retrieving or mutating a RESTful
+// resource.
+type Helper struct {
+	// The name of this resource as the server would recognize it
+	Resource string
+	// A RESTClient capable of mutating this resource.
+	RESTClient RESTClient
+	// True if the resource type is scoped to namespaces
+	NamespaceScoped bool
+	// If true, then use server-side dry-run to not persist changes to storage
+	// for verbs and resources that support server-side dry-run.
+	//
+	// Note this should only be used against an apiserver with dry-run enabled,
+	// and on resources that support dry-run. If the apiserver or the resource
+	// does not support dry-run, then the change will be persisted to storage.
+	ServerDryRun bool
+}
+
+// NewHelper creates a Helper from a ResourceMapping
+func NewHelper(client RESTClient, mapping *meta.RESTMapping) *Helper {
+	return &Helper{
+		Resource:        mapping.Resource.Resource,
+		RESTClient:      client,
+		NamespaceScoped: mapping.Scope.Name() == meta.RESTScopeNameNamespace,
+	}
+}
+
+// DryRun, if true, will use server-side dry-run to not persist changes to storage.
+// Otherwise, changes will be persisted to storage.
+func (m *Helper) DryRun(dryRun bool) *Helper {
+	m.ServerDryRun = dryRun
+	return m
+}
+
+func (m *Helper) Get(namespace, name string, export bool) (runtime.Object, error) {
+	req := m.RESTClient.Get().
+		NamespaceIfScoped(namespace, m.NamespaceScoped).
+		Resource(m.Resource).
+		Name(name)
+	if export {
+		// TODO: I should be part of GetOptions
+		req.Param("export", strconv.FormatBool(export))
+	}
+	return req.Do(context.TODO()).Get()
+}
+
+func (m *Helper) List(namespace, apiVersion string, export bool, options *metav1.ListOptions) (runtime.Object, error) {
+	req := m.RESTClient.Get().
+		NamespaceIfScoped(namespace, m.NamespaceScoped).
+		Resource(m.Resource).
+		VersionedParams(options, metav1.ParameterCodec)
+	if export {
+		// TODO: I should be part of ListOptions
+		req.Param("export", strconv.FormatBool(export))
+	}
+	return req.Do(context.TODO()).Get()
+}
+
+func (m *Helper) Watch(namespace, apiVersion string, options *metav1.ListOptions) (watch.Interface, error) {
+	options.Watch = true
+	return m.RESTClient.Get().
+		NamespaceIfScoped(namespace, m.NamespaceScoped).
+		Resource(m.Resource).
+		VersionedParams(options, metav1.ParameterCodec).
+		Watch(context.TODO())
+}
+
+func (m *Helper) WatchSingle(namespace, name, resourceVersion string) (watch.Interface, error) {
+	return m.RESTClient.Get().
+		NamespaceIfScoped(namespace, m.NamespaceScoped).
+		Resource(m.Resource).
+		VersionedParams(&metav1.ListOptions{
+			ResourceVersion: resourceVersion,
+			Watch:           true,
+			FieldSelector:   fields.OneTermEqualSelector("metadata.name", name).String(),
+		}, metav1.ParameterCodec).
+		Watch(context.TODO())
+}
+
+func (m *Helper) Delete(namespace, name string) (runtime.Object, error) {
+	return m.DeleteWithOptions(namespace, name, nil)
+}
+
+func (m *Helper) DeleteWithOptions(namespace, name string, options *metav1.DeleteOptions) (runtime.Object, error) {
+	if options == nil {
+		options = &metav1.DeleteOptions{}
+	}
+	if m.ServerDryRun {
+		options.DryRun = []string{metav1.DryRunAll}
+	}
+
+	return m.RESTClient.Delete().
+		NamespaceIfScoped(namespace, m.NamespaceScoped).
+		Resource(m.Resource).
+		Name(name).
+		Body(options).
+		Do(context.TODO()).
+		Get()
+}
+
+func (m *Helper) Create(namespace string, modify bool, obj runtime.Object) (runtime.Object, error) {
+	return m.CreateWithOptions(namespace, modify, obj, nil)
+}
+
+func (m *Helper) CreateWithOptions(namespace string, modify bool, obj runtime.Object, options *metav1.CreateOptions) (runtime.Object, error) {
+	if options == nil {
+		options = &metav1.CreateOptions{}
+	}
+	if m.ServerDryRun {
+		options.DryRun = []string{metav1.DryRunAll}
+	}
+	if modify {
+		// Attempt to version the object based on client logic.
+		version, err := metadataAccessor.ResourceVersion(obj)
+		if err != nil {
+			// We don't know how to clear the version on this object, so send it to the server as is
+			return m.createResource(m.RESTClient, m.Resource, namespace, obj, options)
+		}
+		if version != "" {
+			if err := metadataAccessor.SetResourceVersion(obj, ""); err != nil {
+				return nil, err
+			}
+		}
+	}
+
+	return m.createResource(m.RESTClient, m.Resource, namespace, obj, options)
+}
+
+func (m *Helper) createResource(c RESTClient, resource, namespace string, obj runtime.Object, options *metav1.CreateOptions) (runtime.Object, error) {
+	return c.Post().
+		NamespaceIfScoped(namespace, m.NamespaceScoped).
+		Resource(resource).
+		VersionedParams(options, metav1.ParameterCodec).
+		Body(obj).
+		Do(context.TODO()).
+		Get()
+}
+func (m *Helper) Patch(namespace, name string, pt types.PatchType, data []byte, options *metav1.PatchOptions) (runtime.Object, error) {
+	if options == nil {
+		options = &metav1.PatchOptions{}
+	}
+	if m.ServerDryRun {
+		options.DryRun = []string{metav1.DryRunAll}
+	}
+	return m.RESTClient.Patch(pt).
+		NamespaceIfScoped(namespace, m.NamespaceScoped).
+		Resource(m.Resource).
+		Name(name).
+		VersionedParams(options, metav1.ParameterCodec).
+		Body(data).
+		Do(context.TODO()).
+		Get()
+}
+
+func (m *Helper) Replace(namespace, name string, overwrite bool, obj runtime.Object) (runtime.Object, error) {
+	c := m.RESTClient
+	var options = &metav1.UpdateOptions{}
+	if m.ServerDryRun {
+		options.DryRun = []string{metav1.DryRunAll}
+	}
+
+	// Attempt to version the object based on client logic.
+	version, err := metadataAccessor.ResourceVersion(obj)
+	if err != nil {
+		// We don't know how to version this object, so send it to the server as is
+		return m.replaceResource(c, m.Resource, namespace, name, obj, options)
+	}
+	if version == "" && overwrite {
+		// Retrieve the current version of the object to overwrite the server object
+		serverObj, err := c.Get().NamespaceIfScoped(namespace, m.NamespaceScoped).Resource(m.Resource).Name(name).Do(context.TODO()).Get()
+		if err != nil {
+			// The object does not exist, but we want it to be created
+			return m.replaceResource(c, m.Resource, namespace, name, obj, options)
+		}
+		serverVersion, err := metadataAccessor.ResourceVersion(serverObj)
+		if err != nil {
+			return nil, err
+		}
+		if err := metadataAccessor.SetResourceVersion(obj, serverVersion); err != nil {
+			return nil, err
+		}
+	}
+
+	return m.replaceResource(c, m.Resource, namespace, name, obj, options)
+}
+
+func (m *Helper) replaceResource(c RESTClient, resource, namespace, name string, obj runtime.Object, options *metav1.UpdateOptions) (runtime.Object, error) {
+	return c.Put().
+		NamespaceIfScoped(namespace, m.NamespaceScoped).
+		Resource(resource).
+		Name(name).
+		VersionedParams(options, metav1.ParameterCodec).
+		Body(obj).
+		Do(context.TODO()).
+		Get()
+}
diff --git a/vendor/k8s.io/cli-runtime/pkg/resource/interfaces.go b/vendor/k8s.io/cli-runtime/pkg/resource/interfaces.go
new file mode 100644
index 00000000..29d7b34a
--- /dev/null
+++ b/vendor/k8s.io/cli-runtime/pkg/resource/interfaces.go
@@ -0,0 +1,103 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package resource
+
+import (
+	"k8s.io/apimachinery/pkg/api/meta"
+	"k8s.io/apimachinery/pkg/types"
+	"k8s.io/client-go/discovery"
+	"k8s.io/client-go/rest"
+	"k8s.io/client-go/restmapper"
+)
+
+type RESTClientGetter interface {
+	ToRESTConfig() (*rest.Config, error)
+	ToDiscoveryClient() (discovery.CachedDiscoveryInterface, error)
+	ToRESTMapper() (meta.RESTMapper, error)
+}
+
+type ClientConfigFunc func() (*rest.Config, error)
+type RESTMapperFunc func() (meta.RESTMapper, error)
+type CategoryExpanderFunc func() (restmapper.CategoryExpander, error)
+
+// RESTClient is a client helper for dealing with RESTful resources
+// in a generic way.
+type RESTClient interface {
+	Get() *rest.Request
+	Post() *rest.Request
+	Patch(types.PatchType) *rest.Request
+	Delete() *rest.Request
+	Put() *rest.Request
+}
+
+// RequestTransform is a function that is given a chance to modify the outgoing request.
+type RequestTransform func(*rest.Request)
+
+// NewClientWithOptions wraps the provided RESTClient and invokes each transform on each
+// newly created request.
+func NewClientWithOptions(c RESTClient, transforms ...RequestTransform) RESTClient {
+	if len(transforms) == 0 {
+		return c
+	}
+	return &clientOptions{c: c, transforms: transforms}
+}
+
+type clientOptions struct {
+	c          RESTClient
+	transforms []RequestTransform
+}
+
+func (c *clientOptions) modify(req *rest.Request) *rest.Request {
+	for _, transform := range c.transforms {
+		transform(req)
+	}
+	return req
+}
+
+func (c *clientOptions) Get() *rest.Request {
+	return c.modify(c.c.Get())
+}
+
+func (c *clientOptions) Post() *rest.Request {
+	return c.modify(c.c.Post())
+}
+func (c *clientOptions) Patch(t types.PatchType) *rest.Request {
+	return c.modify(c.c.Patch(t))
+}
+func (c *clientOptions) Delete() *rest.Request {
+	return c.modify(c.c.Delete())
+}
+func (c *clientOptions) Put() *rest.Request {
+	return c.modify(c.c.Put())
+}
+
+// ContentValidator is an interface that knows how to validate an API object serialized to a byte array.
+type ContentValidator interface {
+	ValidateBytes(data []byte) error
+}
+
+// Visitor lets clients walk a list of resources.
+type Visitor interface {
+	Visit(VisitorFunc) error
+}
+
+// VisitorFunc implements the Visitor interface for a matching function.
+// If there was a problem walking a list of resources, the incoming error
+// will describe the problem and the function can decide how to handle that error.
+// A nil returned indicates to accept an error to continue loops even when errors happen.
+// This is useful for ignoring certain kinds of errors or aggregating errors in some way.
+type VisitorFunc func(*Info, error) error
diff --git a/vendor/k8s.io/cli-runtime/pkg/resource/mapper.go b/vendor/k8s.io/cli-runtime/pkg/resource/mapper.go
new file mode 100644
index 00000000..962f3771
--- /dev/null
+++ b/vendor/k8s.io/cli-runtime/pkg/resource/mapper.go
@@ -0,0 +1,161 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package resource
+
+import (
+	"fmt"
+	"reflect"
+
+	"k8s.io/apimachinery/pkg/runtime"
+	"k8s.io/apimachinery/pkg/runtime/schema"
+)
+
+// Mapper is a convenience struct for holding references to the interfaces
+// needed to create Info for arbitrary objects.
+type mapper struct {
+	// localFn indicates the call can't make server requests
+	localFn func() bool
+
+	restMapperFn RESTMapperFunc
+	clientFn     func(version schema.GroupVersion) (RESTClient, error)
+	decoder      runtime.Decoder
+}
+
+// InfoForData creates an Info object for the given data. An error is returned
+// if any of the decoding or client lookup steps fail. Name and namespace will be
+// set into Info if the mapping's MetadataAccessor can retrieve them.
+func (m *mapper) infoForData(data []byte, source string) (*Info, error) {
+	obj, gvk, err := m.decoder.Decode(data, nil, nil)
+	if err != nil {
+		return nil, fmt.Errorf("unable to decode %q: %v", source, err)
+	}
+
+	name, _ := metadataAccessor.Name(obj)
+	namespace, _ := metadataAccessor.Namespace(obj)
+	resourceVersion, _ := metadataAccessor.ResourceVersion(obj)
+
+	ret := &Info{
+		Source:          source,
+		Namespace:       namespace,
+		Name:            name,
+		ResourceVersion: resourceVersion,
+
+		Object: obj,
+	}
+
+	if m.localFn == nil || !m.localFn() {
+		restMapper, err := m.restMapperFn()
+		if err != nil {
+			return nil, err
+		}
+		mapping, err := restMapper.RESTMapping(gvk.GroupKind(), gvk.Version)
+		if err != nil {
+			return nil, fmt.Errorf("unable to recognize %q: %v", source, err)
+		}
+		ret.Mapping = mapping
+
+		client, err := m.clientFn(gvk.GroupVersion())
+		if err != nil {
+			return nil, fmt.Errorf("unable to connect to a server to handle %q: %v", mapping.Resource, err)
+		}
+		ret.Client = client
+	}
+
+	return ret, nil
+}
+
+// InfoForObject creates an Info object for the given Object. An error is returned
+// if the object cannot be introspected. Name and namespace will be set into Info
+// if the mapping's MetadataAccessor can retrieve them.
+func (m *mapper) infoForObject(obj runtime.Object, typer runtime.ObjectTyper, preferredGVKs []schema.GroupVersionKind) (*Info, error) {
+	groupVersionKinds, _, err := typer.ObjectKinds(obj)
+	if err != nil {
+		return nil, fmt.Errorf("unable to get type info from the object %q: %v", reflect.TypeOf(obj), err)
+	}
+
+	gvk := groupVersionKinds[0]
+	if len(groupVersionKinds) > 1 && len(preferredGVKs) > 0 {
+		gvk = preferredObjectKind(groupVersionKinds, preferredGVKs)
+	}
+
+	name, _ := metadataAccessor.Name(obj)
+	namespace, _ := metadataAccessor.Namespace(obj)
+	resourceVersion, _ := metadataAccessor.ResourceVersion(obj)
+	ret := &Info{
+		Namespace:       namespace,
+		Name:            name,
+		ResourceVersion: resourceVersion,
+
+		Object: obj,
+	}
+
+	if m.localFn == nil || !m.localFn() {
+		restMapper, err := m.restMapperFn()
+		if err != nil {
+			return nil, err
+		}
+		mapping, err := restMapper.RESTMapping(gvk.GroupKind(), gvk.Version)
+		if err != nil {
+			return nil, fmt.Errorf("unable to recognize %v", err)
+		}
+		ret.Mapping = mapping
+
+		client, err := m.clientFn(gvk.GroupVersion())
+		if err != nil {
+			return nil, fmt.Errorf("unable to connect to a server to handle %q: %v", mapping.Resource, err)
+		}
+		ret.Client = client
+	}
+
+	return ret, nil
+}
+
+// preferredObjectKind picks the possibility that most closely matches the priority list in this order:
+// GroupVersionKind matches (exact match)
+// GroupKind matches
+// Group matches
+func preferredObjectKind(possibilities []schema.GroupVersionKind, preferences []schema.GroupVersionKind) schema.GroupVersionKind {
+	// Exact match
+	for _, priority := range preferences {
+		for _, possibility := range possibilities {
+			if possibility == priority {
+				return possibility
+			}
+		}
+	}
+
+	// GroupKind match
+	for _, priority := range preferences {
+		for _, possibility := range possibilities {
+			if possibility.GroupKind() == priority.GroupKind() {
+				return possibility
+			}
+		}
+	}
+
+	// Group match
+	for _, priority := range preferences {
+		for _, possibility := range possibilities {
+			if possibility.Group == priority.Group {
+				return possibility
+			}
+		}
+	}
+
+	// Just pick the first
+	return possibilities[0]
+}
diff --git a/vendor/k8s.io/cli-runtime/pkg/resource/metadata_decoder.go b/vendor/k8s.io/cli-runtime/pkg/resource/metadata_decoder.go
new file mode 100644
index 00000000..c79c6b5e
--- /dev/null
+++ b/vendor/k8s.io/cli-runtime/pkg/resource/metadata_decoder.go
@@ -0,0 +1,59 @@
+/*
+Copyright 2019 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package resource
+
+import (
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	"k8s.io/apimachinery/pkg/runtime"
+	"k8s.io/apimachinery/pkg/runtime/schema"
+	"k8s.io/apimachinery/pkg/runtime/serializer/json"
+)
+
+// hold a single instance of the case-sensitive decoder
+var caseSensitiveJsonIterator = json.CaseSensitiveJsonIterator()
+
+// metadataValidatingDecoder wraps a decoder and additionally ensures metadata schema fields decode before returning an unstructured object
+type metadataValidatingDecoder struct {
+	decoder runtime.Decoder
+}
+
+func (m *metadataValidatingDecoder) Decode(data []byte, defaults *schema.GroupVersionKind, into runtime.Object) (runtime.Object, *schema.GroupVersionKind, error) {
+	obj, gvk, err := m.decoder.Decode(data, defaults, into)
+
+	// if we already errored, return
+	if err != nil {
+		return obj, gvk, err
+	}
+
+	// if we're not unstructured, return
+	if _, isUnstructured := obj.(runtime.Unstructured); !isUnstructured {
+		return obj, gvk, err
+	}
+
+	// make sure the data can decode into ObjectMeta before we return,
+	// so we don't silently truncate schema errors in metadata later with accesser get/set calls
+	v := &metadataOnlyObject{}
+	if typedErr := caseSensitiveJsonIterator.Unmarshal(data, v); typedErr != nil {
+		return obj, gvk, typedErr
+	}
+	return obj, gvk, err
+}
+
+type metadataOnlyObject struct {
+	metav1.TypeMeta   `json:",inline"`
+	metav1.ObjectMeta `json:"metadata,omitempty"`
+}
diff --git a/vendor/k8s.io/cli-runtime/pkg/resource/result.go b/vendor/k8s.io/cli-runtime/pkg/resource/result.go
new file mode 100644
index 00000000..b8722afe
--- /dev/null
+++ b/vendor/k8s.io/cli-runtime/pkg/resource/result.go
@@ -0,0 +1,242 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package resource
+
+import (
+	"fmt"
+	"reflect"
+
+	"k8s.io/api/core/v1"
+	"k8s.io/apimachinery/pkg/api/meta"
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	"k8s.io/apimachinery/pkg/runtime"
+	"k8s.io/apimachinery/pkg/runtime/schema"
+	utilerrors "k8s.io/apimachinery/pkg/util/errors"
+	"k8s.io/apimachinery/pkg/util/sets"
+	"k8s.io/apimachinery/pkg/watch"
+)
+
+// ErrMatchFunc can be used to filter errors that may not be true failures.
+type ErrMatchFunc func(error) bool
+
+// Result contains helper methods for dealing with the outcome of a Builder.
+type Result struct {
+	err     error
+	visitor Visitor
+
+	sources            []Visitor
+	singleItemImplied  bool
+	targetsSingleItems bool
+
+	mapper       *mapper
+	ignoreErrors []utilerrors.Matcher
+
+	// populated by a call to Infos
+	info []*Info
+}
+
+// withError allows a fluent style for internal result code.
+func (r *Result) withError(err error) *Result {
+	r.err = err
+	return r
+}
+
+// TargetsSingleItems returns true if any of the builder arguments pointed
+// to non-list calls (if the user explicitly asked for any object by name).
+// This includes directories, streams, URLs, and resource name tuples.
+func (r *Result) TargetsSingleItems() bool {
+	return r.targetsSingleItems
+}
+
+// IgnoreErrors will filter errors that occur when by visiting the result
+// (but not errors that occur by creating the result in the first place),
+// eliminating any that match fns. This is best used in combination with
+// Builder.ContinueOnError(), where the visitors accumulate errors and return
+// them after visiting as a slice of errors. If no errors remain after
+// filtering, the various visitor methods on Result will return nil for
+// err.
+func (r *Result) IgnoreErrors(fns ...ErrMatchFunc) *Result {
+	for _, fn := range fns {
+		r.ignoreErrors = append(r.ignoreErrors, utilerrors.Matcher(fn))
+	}
+	return r
+}
+
+// Mapper returns a copy of the builder's mapper.
+func (r *Result) Mapper() *mapper {
+	return r.mapper
+}
+
+// Err returns one or more errors (via a util.ErrorList) that occurred prior
+// to visiting the elements in the visitor. To see all errors including those
+// that occur during visitation, invoke Infos().
+func (r *Result) Err() error {
+	return r.err
+}
+
+// Visit implements the Visitor interface on the items described in the Builder.
+// Note that some visitor sources are not traversable more than once, or may
+// return different results.  If you wish to operate on the same set of resources
+// multiple times, use the Infos() method.
+func (r *Result) Visit(fn VisitorFunc) error {
+	if r.err != nil {
+		return r.err
+	}
+	err := r.visitor.Visit(fn)
+	return utilerrors.FilterOut(err, r.ignoreErrors...)
+}
+
+// IntoSingleItemImplied sets the provided boolean pointer to true if the Builder input
+// implies a single item, or multiple.
+func (r *Result) IntoSingleItemImplied(b *bool) *Result {
+	*b = r.singleItemImplied
+	return r
+}
+
+// Infos returns an array of all of the resource infos retrieved via traversal.
+// Will attempt to traverse the entire set of visitors only once, and will return
+// a cached list on subsequent calls.
+func (r *Result) Infos() ([]*Info, error) {
+	if r.err != nil {
+		return nil, r.err
+	}
+	if r.info != nil {
+		return r.info, nil
+	}
+
+	infos := []*Info{}
+	err := r.visitor.Visit(func(info *Info, err error) error {
+		if err != nil {
+			return err
+		}
+		infos = append(infos, info)
+		return nil
+	})
+	err = utilerrors.FilterOut(err, r.ignoreErrors...)
+
+	r.info, r.err = infos, err
+	return infos, err
+}
+
+// Object returns a single object representing the output of a single visit to all
+// found resources.  If the Builder was a singular context (expected to return a
+// single resource by user input) and only a single resource was found, the resource
+// will be returned as is.  Otherwise, the returned resources will be part of an
+// v1.List. The ResourceVersion of the v1.List will be set only if it is identical
+// across all infos returned.
+func (r *Result) Object() (runtime.Object, error) {
+	infos, err := r.Infos()
+	if err != nil {
+		return nil, err
+	}
+
+	versions := sets.String{}
+	objects := []runtime.Object{}
+	for _, info := range infos {
+		if info.Object != nil {
+			objects = append(objects, info.Object)
+			versions.Insert(info.ResourceVersion)
+		}
+	}
+
+	if len(objects) == 1 {
+		if r.singleItemImplied {
+			return objects[0], nil
+		}
+		// if the item is a list already, don't create another list
+		if meta.IsListType(objects[0]) {
+			return objects[0], nil
+		}
+	}
+
+	version := ""
+	if len(versions) == 1 {
+		version = versions.List()[0]
+	}
+
+	return toV1List(objects, version), err
+}
+
+// Compile time check to enforce that list implements the necessary interface
+var _ metav1.ListInterface = &v1.List{}
+var _ metav1.ListMetaAccessor = &v1.List{}
+
+// toV1List takes a slice of Objects + their version, and returns
+// a v1.List Object containing the objects in the Items field
+func toV1List(objects []runtime.Object, version string) runtime.Object {
+	raw := []runtime.RawExtension{}
+	for _, o := range objects {
+		raw = append(raw, runtime.RawExtension{Object: o})
+	}
+	return &v1.List{
+		ListMeta: metav1.ListMeta{
+			ResourceVersion: version,
+		},
+		Items: raw,
+	}
+}
+
+// ResourceMapping returns a single meta.RESTMapping representing the
+// resources located by the builder, or an error if more than one
+// mapping was found.
+func (r *Result) ResourceMapping() (*meta.RESTMapping, error) {
+	if r.err != nil {
+		return nil, r.err
+	}
+	mappings := map[schema.GroupVersionResource]*meta.RESTMapping{}
+	for i := range r.sources {
+		m, ok := r.sources[i].(ResourceMapping)
+		if !ok {
+			return nil, fmt.Errorf("a resource mapping could not be loaded from %v", reflect.TypeOf(r.sources[i]))
+		}
+		mapping := m.ResourceMapping()
+		mappings[mapping.Resource] = mapping
+	}
+	if len(mappings) != 1 {
+		return nil, fmt.Errorf("expected only a single resource type")
+	}
+	for _, mapping := range mappings {
+		return mapping, nil
+	}
+	return nil, nil
+}
+
+// Watch retrieves changes that occur on the server to the specified resource.
+// It currently supports watching a single source - if the resource source
+// (selectors or pure types) can be watched, they will be, otherwise the list
+// will be visited (equivalent to the Infos() call) and if there is a single
+// resource present, it will be watched, otherwise an error will be returned.
+func (r *Result) Watch(resourceVersion string) (watch.Interface, error) {
+	if r.err != nil {
+		return nil, r.err
+	}
+	if len(r.sources) != 1 {
+		return nil, fmt.Errorf("you may only watch a single resource or type of resource at a time")
+	}
+	w, ok := r.sources[0].(Watchable)
+	if !ok {
+		info, err := r.Infos()
+		if err != nil {
+			return nil, err
+		}
+		if len(info) != 1 {
+			return nil, fmt.Errorf("watch is only supported on individual resources and resource collections - %d resources were found", len(info))
+		}
+		return info[0].Watch(resourceVersion)
+	}
+	return w.Watch(resourceVersion)
+}
diff --git a/vendor/k8s.io/cli-runtime/pkg/resource/scheme.go b/vendor/k8s.io/cli-runtime/pkg/resource/scheme.go
new file mode 100644
index 00000000..0a47d159
--- /dev/null
+++ b/vendor/k8s.io/cli-runtime/pkg/resource/scheme.go
@@ -0,0 +1,82 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package resource
+
+import (
+	"encoding/json"
+	"io"
+	"strings"
+
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
+	"k8s.io/apimachinery/pkg/runtime"
+	"k8s.io/apimachinery/pkg/runtime/schema"
+	"k8s.io/apimachinery/pkg/runtime/serializer"
+	"k8s.io/client-go/kubernetes/scheme"
+	"k8s.io/client-go/rest"
+)
+
+// dynamicCodec is a codec that wraps the standard unstructured codec
+// with special handling for Status objects.
+// Deprecated only used by test code and its wrong
+type dynamicCodec struct{}
+
+func (dynamicCodec) Decode(data []byte, gvk *schema.GroupVersionKind, obj runtime.Object) (runtime.Object, *schema.GroupVersionKind, error) {
+	obj, gvk, err := unstructured.UnstructuredJSONScheme.Decode(data, gvk, obj)
+	if err != nil {
+		return nil, nil, err
+	}
+
+	if strings.ToLower(gvk.Kind) == "status" && gvk.Version == "v1" && (gvk.Group == "" || gvk.Group == "meta.k8s.io") {
+		if _, ok := obj.(*metav1.Status); !ok {
+			obj = &metav1.Status{}
+			err := json.Unmarshal(data, obj)
+			if err != nil {
+				return nil, nil, err
+			}
+		}
+	}
+
+	return obj, gvk, nil
+}
+
+func (dynamicCodec) Encode(obj runtime.Object, w io.Writer) error {
+	// There is no need to handle runtime.CacheableObject, as we only
+	// fallback to other encoders here.
+	return unstructured.UnstructuredJSONScheme.Encode(obj, w)
+}
+
+// Identifier implements runtime.Encoder interface.
+func (dynamicCodec) Identifier() runtime.Identifier {
+	return unstructured.UnstructuredJSONScheme.Identifier()
+}
+
+// UnstructuredPlusDefaultContentConfig returns a rest.ContentConfig for dynamic types.  It includes enough codecs to act as a "normal"
+// serializer for the rest.client with options, status and the like.
+func UnstructuredPlusDefaultContentConfig() rest.ContentConfig {
+	// TODO: scheme.Codecs here should become "pkg/apis/server/scheme" which is the minimal core you need
+	// to talk to a kubernetes server
+	jsonInfo, _ := runtime.SerializerInfoForMediaType(scheme.Codecs.SupportedMediaTypes(), runtime.ContentTypeJSON)
+
+	jsonInfo.Serializer = dynamicCodec{}
+	jsonInfo.PrettySerializer = nil
+	return rest.ContentConfig{
+		AcceptContentTypes:   runtime.ContentTypeJSON,
+		ContentType:          runtime.ContentTypeJSON,
+		NegotiatedSerializer: serializer.NegotiatedSerializerWrapper(jsonInfo),
+	}
+}
diff --git a/vendor/k8s.io/cli-runtime/pkg/resource/selector.go b/vendor/k8s.io/cli-runtime/pkg/resource/selector.go
new file mode 100644
index 00000000..0c0bdbec
--- /dev/null
+++ b/vendor/k8s.io/cli-runtime/pkg/resource/selector.go
@@ -0,0 +1,118 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package resource
+
+import (
+	"fmt"
+
+	"k8s.io/apimachinery/pkg/api/errors"
+	"k8s.io/apimachinery/pkg/api/meta"
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	"k8s.io/apimachinery/pkg/watch"
+)
+
+// Selector is a Visitor for resources that match a label selector.
+type Selector struct {
+	Client        RESTClient
+	Mapping       *meta.RESTMapping
+	Namespace     string
+	LabelSelector string
+	FieldSelector string
+	Export        bool
+	LimitChunks   int64
+}
+
+// NewSelector creates a resource selector which hides details of getting items by their label selector.
+func NewSelector(client RESTClient, mapping *meta.RESTMapping, namespace, labelSelector, fieldSelector string, export bool, limitChunks int64) *Selector {
+	return &Selector{
+		Client:        client,
+		Mapping:       mapping,
+		Namespace:     namespace,
+		LabelSelector: labelSelector,
+		FieldSelector: fieldSelector,
+		Export:        export,
+		LimitChunks:   limitChunks,
+	}
+}
+
+// Visit implements Visitor and uses request chunking by default.
+func (r *Selector) Visit(fn VisitorFunc) error {
+	var continueToken string
+	for {
+		list, err := NewHelper(r.Client, r.Mapping).List(
+			r.Namespace,
+			r.ResourceMapping().GroupVersionKind.GroupVersion().String(),
+			r.Export,
+			&metav1.ListOptions{
+				LabelSelector: r.LabelSelector,
+				FieldSelector: r.FieldSelector,
+				Limit:         r.LimitChunks,
+				Continue:      continueToken,
+			},
+		)
+		if err != nil {
+			if errors.IsResourceExpired(err) {
+				return err
+			}
+			if errors.IsBadRequest(err) || errors.IsNotFound(err) {
+				if se, ok := err.(*errors.StatusError); ok {
+					// modify the message without hiding this is an API error
+					if len(r.LabelSelector) == 0 && len(r.FieldSelector) == 0 {
+						se.ErrStatus.Message = fmt.Sprintf("Unable to list %q: %v", r.Mapping.Resource, se.ErrStatus.Message)
+					} else {
+						se.ErrStatus.Message = fmt.Sprintf("Unable to find %q that match label selector %q, field selector %q: %v", r.Mapping.Resource, r.LabelSelector, r.FieldSelector, se.ErrStatus.Message)
+					}
+					return se
+				}
+				if len(r.LabelSelector) == 0 && len(r.FieldSelector) == 0 {
+					return fmt.Errorf("Unable to list %q: %v", r.Mapping.Resource, err)
+				}
+				return fmt.Errorf("Unable to find %q that match label selector %q, field selector %q: %v", r.Mapping.Resource, r.LabelSelector, r.FieldSelector, err)
+			}
+			return err
+		}
+		resourceVersion, _ := metadataAccessor.ResourceVersion(list)
+		nextContinueToken, _ := metadataAccessor.Continue(list)
+		info := &Info{
+			Client:  r.Client,
+			Mapping: r.Mapping,
+
+			Namespace:       r.Namespace,
+			ResourceVersion: resourceVersion,
+
+			Object: list,
+		}
+
+		if err := fn(info, nil); err != nil {
+			return err
+		}
+		if len(nextContinueToken) == 0 {
+			return nil
+		}
+		continueToken = nextContinueToken
+	}
+}
+
+func (r *Selector) Watch(resourceVersion string) (watch.Interface, error) {
+	return NewHelper(r.Client, r.Mapping).Watch(r.Namespace, r.ResourceMapping().GroupVersionKind.GroupVersion().String(),
+		&metav1.ListOptions{ResourceVersion: resourceVersion, LabelSelector: r.LabelSelector, FieldSelector: r.FieldSelector})
+}
+
+// ResourceMapping returns the mapping for this resource and implements ResourceMapping
+func (r *Selector) ResourceMapping() *meta.RESTMapping {
+	return r.Mapping
+}
diff --git a/vendor/k8s.io/cli-runtime/pkg/resource/visitor.go b/vendor/k8s.io/cli-runtime/pkg/resource/visitor.go
new file mode 100644
index 00000000..54fbd4c7
--- /dev/null
+++ b/vendor/k8s.io/cli-runtime/pkg/resource/visitor.go
@@ -0,0 +1,764 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package resource
+
+import (
+	"bytes"
+	"context"
+	"fmt"
+	"io"
+	"net/http"
+	"net/url"
+	"os"
+	"path/filepath"
+	"strings"
+	"time"
+
+	"golang.org/x/text/encoding/unicode"
+	"golang.org/x/text/transform"
+
+	"sigs.k8s.io/kustomize/pkg/fs"
+
+	"k8s.io/apimachinery/pkg/api/errors"
+	"k8s.io/apimachinery/pkg/api/meta"
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	"k8s.io/apimachinery/pkg/labels"
+	"k8s.io/apimachinery/pkg/runtime"
+	"k8s.io/apimachinery/pkg/runtime/schema"
+	utilerrors "k8s.io/apimachinery/pkg/util/errors"
+	"k8s.io/apimachinery/pkg/util/yaml"
+	"k8s.io/apimachinery/pkg/watch"
+	"k8s.io/cli-runtime/pkg/kustomize"
+)
+
+const (
+	constSTDINstr       = "STDIN"
+	stopValidateMessage = "if you choose to ignore these errors, turn validation off with --validate=false"
+)
+
+// Watchable describes a resource that can be watched for changes that occur on the server,
+// beginning after the provided resource version.
+type Watchable interface {
+	Watch(resourceVersion string) (watch.Interface, error)
+}
+
+// ResourceMapping allows an object to return the resource mapping associated with
+// the resource or resources it represents.
+type ResourceMapping interface {
+	ResourceMapping() *meta.RESTMapping
+}
+
+// Info contains temporary info to execute a REST call, or show the results
+// of an already completed REST call.
+type Info struct {
+	// Client will only be present if this builder was not local
+	Client RESTClient
+	// Mapping will only be present if this builder was not local
+	Mapping *meta.RESTMapping
+
+	// Namespace will be set if the object is namespaced and has a specified value.
+	Namespace string
+	Name      string
+
+	// Optional, Source is the filename or URL to template file (.json or .yaml),
+	// or stdin to use to handle the resource
+	Source string
+	// Optional, this is the most recent value returned by the server if available. It will
+	// typically be in unstructured or internal forms, depending on how the Builder was
+	// defined. If retrieved from the server, the Builder expects the mapping client to
+	// decide the final form. Use the AsVersioned, AsUnstructured, and AsInternal helpers
+	// to alter the object versions.
+	Object runtime.Object
+	// Optional, this is the most recent resource version the server knows about for
+	// this type of resource. It may not match the resource version of the object,
+	// but if set it should be equal to or newer than the resource version of the
+	// object (however the server defines resource version).
+	ResourceVersion string
+	// Optional, should this resource be exported, stripped of cluster-specific and instance specific fields
+	Export bool
+}
+
+// Visit implements Visitor
+func (i *Info) Visit(fn VisitorFunc) error {
+	return fn(i, nil)
+}
+
+// Get retrieves the object from the Namespace and Name fields
+func (i *Info) Get() (err error) {
+	obj, err := NewHelper(i.Client, i.Mapping).Get(i.Namespace, i.Name, i.Export)
+	if err != nil {
+		if errors.IsNotFound(err) && len(i.Namespace) > 0 && i.Namespace != metav1.NamespaceDefault && i.Namespace != metav1.NamespaceAll {
+			err2 := i.Client.Get().AbsPath("api", "v1", "namespaces", i.Namespace).Do(context.TODO()).Error()
+			if err2 != nil && errors.IsNotFound(err2) {
+				return err2
+			}
+		}
+		return err
+	}
+	i.Object = obj
+	i.ResourceVersion, _ = metadataAccessor.ResourceVersion(obj)
+	return nil
+}
+
+// Refresh updates the object with another object. If ignoreError is set
+// the Object will be updated even if name, namespace, or resourceVersion
+// attributes cannot be loaded from the object.
+func (i *Info) Refresh(obj runtime.Object, ignoreError bool) error {
+	name, err := metadataAccessor.Name(obj)
+	if err != nil {
+		if !ignoreError {
+			return err
+		}
+	} else {
+		i.Name = name
+	}
+	namespace, err := metadataAccessor.Namespace(obj)
+	if err != nil {
+		if !ignoreError {
+			return err
+		}
+	} else {
+		i.Namespace = namespace
+	}
+	version, err := metadataAccessor.ResourceVersion(obj)
+	if err != nil {
+		if !ignoreError {
+			return err
+		}
+	} else {
+		i.ResourceVersion = version
+	}
+	i.Object = obj
+	return nil
+}
+
+// ObjectName returns an approximate form of the resource's kind/name.
+func (i *Info) ObjectName() string {
+	if i.Mapping != nil {
+		return fmt.Sprintf("%s/%s", i.Mapping.Resource.Resource, i.Name)
+	}
+	gvk := i.Object.GetObjectKind().GroupVersionKind()
+	if len(gvk.Group) == 0 {
+		return fmt.Sprintf("%s/%s", strings.ToLower(gvk.Kind), i.Name)
+	}
+	return fmt.Sprintf("%s.%s/%s\n", strings.ToLower(gvk.Kind), gvk.Group, i.Name)
+}
+
+// String returns the general purpose string representation
+func (i *Info) String() string {
+	basicInfo := fmt.Sprintf("Name: %q, Namespace: %q", i.Name, i.Namespace)
+	if i.Mapping != nil {
+		mappingInfo := fmt.Sprintf("Resource: %q, GroupVersionKind: %q", i.Mapping.Resource.String(),
+			i.Mapping.GroupVersionKind.String())
+		return fmt.Sprint(mappingInfo, "\n", basicInfo)
+	}
+	return basicInfo
+}
+
+// Namespaced returns true if the object belongs to a namespace
+func (i *Info) Namespaced() bool {
+	if i.Mapping != nil {
+		// if we have RESTMapper info, use it
+		return i.Mapping.Scope.Name() == meta.RESTScopeNameNamespace
+	}
+	// otherwise, use the presence of a namespace in the info as an indicator
+	return len(i.Namespace) > 0
+}
+
+// Watch returns server changes to this object after it was retrieved.
+func (i *Info) Watch(resourceVersion string) (watch.Interface, error) {
+	return NewHelper(i.Client, i.Mapping).WatchSingle(i.Namespace, i.Name, resourceVersion)
+}
+
+// ResourceMapping returns the mapping for this resource and implements ResourceMapping
+func (i *Info) ResourceMapping() *meta.RESTMapping {
+	return i.Mapping
+}
+
+// VisitorList implements Visit for the sub visitors it contains. The first error
+// returned from a child Visitor will terminate iteration.
+type VisitorList []Visitor
+
+// Visit implements Visitor
+func (l VisitorList) Visit(fn VisitorFunc) error {
+	for i := range l {
+		if err := l[i].Visit(fn); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+// EagerVisitorList implements Visit for the sub visitors it contains. All errors
+// will be captured and returned at the end of iteration.
+type EagerVisitorList []Visitor
+
+// Visit implements Visitor, and gathers errors that occur during processing until
+// all sub visitors have been visited.
+func (l EagerVisitorList) Visit(fn VisitorFunc) error {
+	errs := []error(nil)
+	for i := range l {
+		if err := l[i].Visit(func(info *Info, err error) error {
+			if err != nil {
+				errs = append(errs, err)
+				return nil
+			}
+			if err := fn(info, nil); err != nil {
+				errs = append(errs, err)
+			}
+			return nil
+		}); err != nil {
+			errs = append(errs, err)
+		}
+	}
+	return utilerrors.NewAggregate(errs)
+}
+
+func ValidateSchema(data []byte, schema ContentValidator) error {
+	if schema == nil {
+		return nil
+	}
+	if err := schema.ValidateBytes(data); err != nil {
+		return fmt.Errorf("error validating data: %v; %s", err, stopValidateMessage)
+	}
+	return nil
+}
+
+// URLVisitor downloads the contents of a URL, and if successful, returns
+// an info object representing the downloaded object.
+type URLVisitor struct {
+	URL *url.URL
+	*StreamVisitor
+	HttpAttemptCount int
+}
+
+func (v *URLVisitor) Visit(fn VisitorFunc) error {
+	body, err := readHttpWithRetries(httpgetImpl, time.Second, v.URL.String(), v.HttpAttemptCount)
+	if err != nil {
+		return err
+	}
+	defer body.Close()
+	v.StreamVisitor.Reader = body
+	return v.StreamVisitor.Visit(fn)
+}
+
+// readHttpWithRetries tries to http.Get the v.URL retries times before giving up.
+func readHttpWithRetries(get httpget, duration time.Duration, u string, attempts int) (io.ReadCloser, error) {
+	var err error
+	var body io.ReadCloser
+	if attempts <= 0 {
+		return nil, fmt.Errorf("http attempts must be greater than 0, was %d", attempts)
+	}
+	for i := 0; i < attempts; i++ {
+		var statusCode int
+		var status string
+		if i > 0 {
+			time.Sleep(duration)
+		}
+
+		// Try to get the URL
+		statusCode, status, body, err = get(u)
+
+		// Retry Errors
+		if err != nil {
+			continue
+		}
+
+		// Error - Set the error condition from the StatusCode
+		if statusCode != http.StatusOK {
+			err = fmt.Errorf("unable to read URL %q, server reported %s, status code=%d", u, status, statusCode)
+		}
+
+		if statusCode >= 500 && statusCode < 600 {
+			// Retry 500's
+			continue
+		} else {
+			// Don't retry other StatusCodes
+			break
+		}
+	}
+	return body, err
+}
+
+// httpget Defines function to retrieve a url and return the results.  Exists for unit test stubbing.
+type httpget func(url string) (int, string, io.ReadCloser, error)
+
+// httpgetImpl Implements a function to retrieve a url and return the results.
+func httpgetImpl(url string) (int, string, io.ReadCloser, error) {
+	resp, err := http.Get(url)
+	if err != nil {
+		return 0, "", nil, err
+	}
+	return resp.StatusCode, resp.Status, resp.Body, nil
+}
+
+// DecoratedVisitor will invoke the decorators in order prior to invoking the visitor function
+// passed to Visit. An error will terminate the visit.
+type DecoratedVisitor struct {
+	visitor    Visitor
+	decorators []VisitorFunc
+}
+
+// NewDecoratedVisitor will create a visitor that invokes the provided visitor functions before
+// the user supplied visitor function is invoked, giving them the opportunity to mutate the Info
+// object or terminate early with an error.
+func NewDecoratedVisitor(v Visitor, fn ...VisitorFunc) Visitor {
+	if len(fn) == 0 {
+		return v
+	}
+	return DecoratedVisitor{v, fn}
+}
+
+// Visit implements Visitor
+func (v DecoratedVisitor) Visit(fn VisitorFunc) error {
+	return v.visitor.Visit(func(info *Info, err error) error {
+		if err != nil {
+			return err
+		}
+		for i := range v.decorators {
+			if err := v.decorators[i](info, nil); err != nil {
+				return err
+			}
+		}
+		return fn(info, nil)
+	})
+}
+
+// ContinueOnErrorVisitor visits each item and, if an error occurs on
+// any individual item, returns an aggregate error after all items
+// are visited.
+type ContinueOnErrorVisitor struct {
+	Visitor
+}
+
+// Visit returns nil if no error occurs during traversal, a regular
+// error if one occurs, or if multiple errors occur, an aggregate
+// error.  If the provided visitor fails on any individual item it
+// will not prevent the remaining items from being visited. An error
+// returned by the visitor directly may still result in some items
+// not being visited.
+func (v ContinueOnErrorVisitor) Visit(fn VisitorFunc) error {
+	errs := []error{}
+	err := v.Visitor.Visit(func(info *Info, err error) error {
+		if err != nil {
+			errs = append(errs, err)
+			return nil
+		}
+		if err := fn(info, nil); err != nil {
+			errs = append(errs, err)
+		}
+		return nil
+	})
+	if err != nil {
+		errs = append(errs, err)
+	}
+	if len(errs) == 1 {
+		return errs[0]
+	}
+	return utilerrors.NewAggregate(errs)
+}
+
+// FlattenListVisitor flattens any objects that runtime.ExtractList recognizes as a list
+// - has an "Items" public field that is a slice of runtime.Objects or objects satisfying
+// that interface - into multiple Infos. Returns nil in the case of no errors.
+// When an error is hit on sub items (for instance, if a List contains an object that does
+// not have a registered client or resource), returns an aggregate error.
+type FlattenListVisitor struct {
+	visitor Visitor
+	typer   runtime.ObjectTyper
+	mapper  *mapper
+}
+
+// NewFlattenListVisitor creates a visitor that will expand list style runtime.Objects
+// into individual items and then visit them individually.
+func NewFlattenListVisitor(v Visitor, typer runtime.ObjectTyper, mapper *mapper) Visitor {
+	return FlattenListVisitor{v, typer, mapper}
+}
+
+func (v FlattenListVisitor) Visit(fn VisitorFunc) error {
+	return v.visitor.Visit(func(info *Info, err error) error {
+		if err != nil {
+			return err
+		}
+		if info.Object == nil {
+			return fn(info, nil)
+		}
+		if !meta.IsListType(info.Object) {
+			return fn(info, nil)
+		}
+
+		items := []runtime.Object{}
+		itemsToProcess := []runtime.Object{info.Object}
+
+		for i := 0; i < len(itemsToProcess); i++ {
+			currObj := itemsToProcess[i]
+			if !meta.IsListType(currObj) {
+				items = append(items, currObj)
+				continue
+			}
+
+			currItems, err := meta.ExtractList(currObj)
+			if err != nil {
+				return err
+			}
+			if errs := runtime.DecodeList(currItems, v.mapper.decoder); len(errs) > 0 {
+				return utilerrors.NewAggregate(errs)
+			}
+			itemsToProcess = append(itemsToProcess, currItems...)
+		}
+
+		// If we have a GroupVersionKind on the list, prioritize that when asking for info on the objects contained in the list
+		var preferredGVKs []schema.GroupVersionKind
+		if info.Mapping != nil && !info.Mapping.GroupVersionKind.Empty() {
+			preferredGVKs = append(preferredGVKs, info.Mapping.GroupVersionKind)
+		}
+		errs := []error{}
+		for i := range items {
+			item, err := v.mapper.infoForObject(items[i], v.typer, preferredGVKs)
+			if err != nil {
+				errs = append(errs, err)
+				continue
+			}
+			if len(info.ResourceVersion) != 0 {
+				item.ResourceVersion = info.ResourceVersion
+			}
+			if err := fn(item, nil); err != nil {
+				errs = append(errs, err)
+			}
+		}
+		return utilerrors.NewAggregate(errs)
+
+	})
+}
+
+func ignoreFile(path string, extensions []string) bool {
+	if len(extensions) == 0 {
+		return false
+	}
+	ext := filepath.Ext(path)
+	for _, s := range extensions {
+		if s == ext {
+			return false
+		}
+	}
+	return true
+}
+
+// FileVisitorForSTDIN return a special FileVisitor just for STDIN
+func FileVisitorForSTDIN(mapper *mapper, schema ContentValidator) Visitor {
+	return &FileVisitor{
+		Path:          constSTDINstr,
+		StreamVisitor: NewStreamVisitor(nil, mapper, constSTDINstr, schema),
+	}
+}
+
+// ExpandPathsToFileVisitors will return a slice of FileVisitors that will handle files from the provided path.
+// After FileVisitors open the files, they will pass an io.Reader to a StreamVisitor to do the reading. (stdin
+// is also taken care of). Paths argument also accepts a single file, and will return a single visitor
+func ExpandPathsToFileVisitors(mapper *mapper, paths string, recursive bool, extensions []string, schema ContentValidator) ([]Visitor, error) {
+	var visitors []Visitor
+	err := filepath.Walk(paths, func(path string, fi os.FileInfo, err error) error {
+		if err != nil {
+			return err
+		}
+
+		if fi.IsDir() {
+			if path != paths && !recursive {
+				return filepath.SkipDir
+			}
+			return nil
+		}
+		// Don't check extension if the filepath was passed explicitly
+		if path != paths && ignoreFile(path, extensions) {
+			return nil
+		}
+
+		visitor := &FileVisitor{
+			Path:          path,
+			StreamVisitor: NewStreamVisitor(nil, mapper, path, schema),
+		}
+
+		visitors = append(visitors, visitor)
+		return nil
+	})
+
+	if err != nil {
+		return nil, err
+	}
+	return visitors, nil
+}
+
+// FileVisitor is wrapping around a StreamVisitor, to handle open/close files
+type FileVisitor struct {
+	Path string
+	*StreamVisitor
+}
+
+// Visit in a FileVisitor is just taking care of opening/closing files
+func (v *FileVisitor) Visit(fn VisitorFunc) error {
+	var f *os.File
+	if v.Path == constSTDINstr {
+		f = os.Stdin
+	} else {
+		var err error
+		f, err = os.Open(v.Path)
+		if err != nil {
+			return err
+		}
+		defer f.Close()
+	}
+
+	// TODO: Consider adding a flag to force to UTF16, apparently some
+	// Windows tools don't write the BOM
+	utf16bom := unicode.BOMOverride(unicode.UTF8.NewDecoder())
+	v.StreamVisitor.Reader = transform.NewReader(f, utf16bom)
+
+	return v.StreamVisitor.Visit(fn)
+}
+
+// KustomizeVisitor is wrapper around a StreamVisitor, to handle Kustomization directories
+type KustomizeVisitor struct {
+	Path string
+	*StreamVisitor
+}
+
+// Visit in a KustomizeVisitor gets the output of Kustomize build and save it in the Streamvisitor
+func (v *KustomizeVisitor) Visit(fn VisitorFunc) error {
+	fSys := fs.MakeRealFS()
+	var out bytes.Buffer
+	err := kustomize.RunKustomizeBuild(&out, fSys, v.Path)
+	if err != nil {
+		return err
+	}
+	v.StreamVisitor.Reader = bytes.NewReader(out.Bytes())
+	return v.StreamVisitor.Visit(fn)
+}
+
+// StreamVisitor reads objects from an io.Reader and walks them. A stream visitor can only be
+// visited once.
+// TODO: depends on objects being in JSON format before being passed to decode - need to implement
+// a stream decoder method on runtime.Codec to properly handle this.
+type StreamVisitor struct {
+	io.Reader
+	*mapper
+
+	Source string
+	Schema ContentValidator
+}
+
+// NewStreamVisitor is a helper function that is useful when we want to change the fields of the struct but keep calls the same.
+func NewStreamVisitor(r io.Reader, mapper *mapper, source string, schema ContentValidator) *StreamVisitor {
+	return &StreamVisitor{
+		Reader: r,
+		mapper: mapper,
+		Source: source,
+		Schema: schema,
+	}
+}
+
+// Visit implements Visitor over a stream. StreamVisitor is able to distinct multiple resources in one stream.
+func (v *StreamVisitor) Visit(fn VisitorFunc) error {
+	d := yaml.NewYAMLOrJSONDecoder(v.Reader, 4096)
+	for {
+		ext := runtime.RawExtension{}
+		if err := d.Decode(&ext); err != nil {
+			if err == io.EOF {
+				return nil
+			}
+			return fmt.Errorf("error parsing %s: %v", v.Source, err)
+		}
+		// TODO: This needs to be able to handle object in other encodings and schemas.
+		ext.Raw = bytes.TrimSpace(ext.Raw)
+		if len(ext.Raw) == 0 || bytes.Equal(ext.Raw, []byte("null")) {
+			continue
+		}
+		if err := ValidateSchema(ext.Raw, v.Schema); err != nil {
+			return fmt.Errorf("error validating %q: %v", v.Source, err)
+		}
+		info, err := v.infoForData(ext.Raw, v.Source)
+		if err != nil {
+			if fnErr := fn(info, err); fnErr != nil {
+				return fnErr
+			}
+			continue
+		}
+		if err := fn(info, nil); err != nil {
+			return err
+		}
+	}
+}
+
+func UpdateObjectNamespace(info *Info, err error) error {
+	if err != nil {
+		return err
+	}
+	if info.Object != nil {
+		return metadataAccessor.SetNamespace(info.Object, info.Namespace)
+	}
+	return nil
+}
+
+// FilterNamespace omits the namespace if the object is not namespace scoped
+func FilterNamespace(info *Info, err error) error {
+	if err != nil {
+		return err
+	}
+	if !info.Namespaced() {
+		info.Namespace = ""
+		UpdateObjectNamespace(info, nil)
+	}
+	return nil
+}
+
+// SetNamespace ensures that every Info object visited will have a namespace
+// set. If info.Object is set, it will be mutated as well.
+func SetNamespace(namespace string) VisitorFunc {
+	return func(info *Info, err error) error {
+		if err != nil {
+			return err
+		}
+		if !info.Namespaced() {
+			return nil
+		}
+		if len(info.Namespace) == 0 {
+			info.Namespace = namespace
+			UpdateObjectNamespace(info, nil)
+		}
+		return nil
+	}
+}
+
+// RequireNamespace will either set a namespace if none is provided on the
+// Info object, or if the namespace is set and does not match the provided
+// value, returns an error. This is intended to guard against administrators
+// accidentally operating on resources outside their namespace.
+func RequireNamespace(namespace string) VisitorFunc {
+	return func(info *Info, err error) error {
+		if err != nil {
+			return err
+		}
+		if !info.Namespaced() {
+			return nil
+		}
+		if len(info.Namespace) == 0 {
+			info.Namespace = namespace
+			UpdateObjectNamespace(info, nil)
+			return nil
+		}
+		if info.Namespace != namespace {
+			return fmt.Errorf("the namespace from the provided object %q does not match the namespace %q. You must pass '--namespace=%s' to perform this operation.", info.Namespace, namespace, info.Namespace)
+		}
+		return nil
+	}
+}
+
+// RetrieveLatest updates the Object on each Info by invoking a standard client
+// Get.
+func RetrieveLatest(info *Info, err error) error {
+	if err != nil {
+		return err
+	}
+	if meta.IsListType(info.Object) {
+		return fmt.Errorf("watch is only supported on individual resources and resource collections, but a list of resources is found")
+	}
+	if len(info.Name) == 0 {
+		return nil
+	}
+	if info.Namespaced() && len(info.Namespace) == 0 {
+		return fmt.Errorf("no namespace set on resource %s %q", info.Mapping.Resource, info.Name)
+	}
+	return info.Get()
+}
+
+// RetrieveLazy updates the object if it has not been loaded yet.
+func RetrieveLazy(info *Info, err error) error {
+	if err != nil {
+		return err
+	}
+	if info.Object == nil {
+		return info.Get()
+	}
+	return nil
+}
+
+// CreateAndRefresh creates an object from input info and refreshes info with that object
+func CreateAndRefresh(info *Info) error {
+	obj, err := NewHelper(info.Client, info.Mapping).Create(info.Namespace, true, info.Object)
+	if err != nil {
+		return err
+	}
+	info.Refresh(obj, true)
+	return nil
+}
+
+type FilterFunc func(info *Info, err error) (bool, error)
+
+type FilteredVisitor struct {
+	visitor Visitor
+	filters []FilterFunc
+}
+
+func NewFilteredVisitor(v Visitor, fn ...FilterFunc) Visitor {
+	if len(fn) == 0 {
+		return v
+	}
+	return FilteredVisitor{v, fn}
+}
+
+func (v FilteredVisitor) Visit(fn VisitorFunc) error {
+	return v.visitor.Visit(func(info *Info, err error) error {
+		if err != nil {
+			return err
+		}
+		for _, filter := range v.filters {
+			ok, err := filter(info, nil)
+			if err != nil {
+				return err
+			}
+			if !ok {
+				return nil
+			}
+		}
+		return fn(info, nil)
+	})
+}
+
+func FilterByLabelSelector(s labels.Selector) FilterFunc {
+	return func(info *Info, err error) (bool, error) {
+		if err != nil {
+			return false, err
+		}
+		a, err := meta.Accessor(info.Object)
+		if err != nil {
+			return false, err
+		}
+		if !s.Matches(labels.Set(a.GetLabels())) {
+			return false, nil
+		}
+		return true, nil
+	}
+}
+
+type InfoListVisitor []*Info
+
+func (infos InfoListVisitor) Visit(fn VisitorFunc) error {
+	var err error
+	for _, i := range infos {
+		err = fn(i, err)
+	}
+	return err
+}
diff --git a/vendor/k8s.io/client-go/discovery/cached/disk/cached_discovery.go b/vendor/k8s.io/client-go/discovery/cached/disk/cached_discovery.go
new file mode 100644
index 00000000..fd8b61d1
--- /dev/null
+++ b/vendor/k8s.io/client-go/discovery/cached/disk/cached_discovery.go
@@ -0,0 +1,300 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package disk
+
+import (
+	"errors"
+	"io/ioutil"
+	"net/http"
+	"os"
+	"path/filepath"
+	"sync"
+	"time"
+
+	openapi_v2 "github.com/googleapis/gnostic/OpenAPIv2"
+	"k8s.io/klog"
+
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	"k8s.io/apimachinery/pkg/runtime"
+	"k8s.io/apimachinery/pkg/version"
+	"k8s.io/client-go/discovery"
+	"k8s.io/client-go/kubernetes/scheme"
+	restclient "k8s.io/client-go/rest"
+)
+
+// CachedDiscoveryClient implements the functions that discovery server-supported API groups,
+// versions and resources.
+type CachedDiscoveryClient struct {
+	delegate discovery.DiscoveryInterface
+
+	// cacheDirectory is the directory where discovery docs are held.  It must be unique per host:port combination to work well.
+	cacheDirectory string
+
+	// ttl is how long the cache should be considered valid
+	ttl time.Duration
+
+	// mutex protects the variables below
+	mutex sync.Mutex
+
+	// ourFiles are all filenames of cache files created by this process
+	ourFiles map[string]struct{}
+	// invalidated is true if all cache files should be ignored that are not ours (e.g. after Invalidate() was called)
+	invalidated bool
+	// fresh is true if all used cache files were ours
+	fresh bool
+}
+
+var _ discovery.CachedDiscoveryInterface = &CachedDiscoveryClient{}
+
+// ServerResourcesForGroupVersion returns the supported resources for a group and version.
+func (d *CachedDiscoveryClient) ServerResourcesForGroupVersion(groupVersion string) (*metav1.APIResourceList, error) {
+	filename := filepath.Join(d.cacheDirectory, groupVersion, "serverresources.json")
+	cachedBytes, err := d.getCachedFile(filename)
+	// don't fail on errors, we either don't have a file or won't be able to run the cached check. Either way we can fallback.
+	if err == nil {
+		cachedResources := &metav1.APIResourceList{}
+		if err := runtime.DecodeInto(scheme.Codecs.UniversalDecoder(), cachedBytes, cachedResources); err == nil {
+			klog.V(10).Infof("returning cached discovery info from %v", filename)
+			return cachedResources, nil
+		}
+	}
+
+	liveResources, err := d.delegate.ServerResourcesForGroupVersion(groupVersion)
+	if err != nil {
+		klog.V(3).Infof("skipped caching discovery info due to %v", err)
+		return liveResources, err
+	}
+	if liveResources == nil || len(liveResources.APIResources) == 0 {
+		klog.V(3).Infof("skipped caching discovery info, no resources found")
+		return liveResources, err
+	}
+
+	if err := d.writeCachedFile(filename, liveResources); err != nil {
+		klog.V(1).Infof("failed to write cache to %v due to %v", filename, err)
+	}
+
+	return liveResources, nil
+}
+
+// ServerResources returns the supported resources for all groups and versions.
+// Deprecated: use ServerGroupsAndResources instead.
+func (d *CachedDiscoveryClient) ServerResources() ([]*metav1.APIResourceList, error) {
+	_, rs, err := discovery.ServerGroupsAndResources(d)
+	return rs, err
+}
+
+// ServerGroupsAndResources returns the supported groups and resources for all groups and versions.
+func (d *CachedDiscoveryClient) ServerGroupsAndResources() ([]*metav1.APIGroup, []*metav1.APIResourceList, error) {
+	return discovery.ServerGroupsAndResources(d)
+}
+
+// ServerGroups returns the supported groups, with information like supported versions and the
+// preferred version.
+func (d *CachedDiscoveryClient) ServerGroups() (*metav1.APIGroupList, error) {
+	filename := filepath.Join(d.cacheDirectory, "servergroups.json")
+	cachedBytes, err := d.getCachedFile(filename)
+	// don't fail on errors, we either don't have a file or won't be able to run the cached check. Either way we can fallback.
+	if err == nil {
+		cachedGroups := &metav1.APIGroupList{}
+		if err := runtime.DecodeInto(scheme.Codecs.UniversalDecoder(), cachedBytes, cachedGroups); err == nil {
+			klog.V(10).Infof("returning cached discovery info from %v", filename)
+			return cachedGroups, nil
+		}
+	}
+
+	liveGroups, err := d.delegate.ServerGroups()
+	if err != nil {
+		klog.V(3).Infof("skipped caching discovery info due to %v", err)
+		return liveGroups, err
+	}
+	if liveGroups == nil || len(liveGroups.Groups) == 0 {
+		klog.V(3).Infof("skipped caching discovery info, no groups found")
+		return liveGroups, err
+	}
+
+	if err := d.writeCachedFile(filename, liveGroups); err != nil {
+		klog.V(1).Infof("failed to write cache to %v due to %v", filename, err)
+	}
+
+	return liveGroups, nil
+}
+
+func (d *CachedDiscoveryClient) getCachedFile(filename string) ([]byte, error) {
+	// after invalidation ignore cache files not created by this process
+	d.mutex.Lock()
+	_, ourFile := d.ourFiles[filename]
+	if d.invalidated && !ourFile {
+		d.mutex.Unlock()
+		return nil, errors.New("cache invalidated")
+	}
+	d.mutex.Unlock()
+
+	file, err := os.Open(filename)
+	if err != nil {
+		return nil, err
+	}
+	defer file.Close()
+
+	fileInfo, err := file.Stat()
+	if err != nil {
+		return nil, err
+	}
+
+	if time.Now().After(fileInfo.ModTime().Add(d.ttl)) {
+		return nil, errors.New("cache expired")
+	}
+
+	// the cache is present and its valid.  Try to read and use it.
+	cachedBytes, err := ioutil.ReadAll(file)
+	if err != nil {
+		return nil, err
+	}
+
+	d.mutex.Lock()
+	defer d.mutex.Unlock()
+	d.fresh = d.fresh && ourFile
+
+	return cachedBytes, nil
+}
+
+func (d *CachedDiscoveryClient) writeCachedFile(filename string, obj runtime.Object) error {
+	if err := os.MkdirAll(filepath.Dir(filename), 0750); err != nil {
+		return err
+	}
+
+	bytes, err := runtime.Encode(scheme.Codecs.LegacyCodec(), obj)
+	if err != nil {
+		return err
+	}
+
+	f, err := ioutil.TempFile(filepath.Dir(filename), filepath.Base(filename)+".")
+	if err != nil {
+		return err
+	}
+	defer os.Remove(f.Name())
+	_, err = f.Write(bytes)
+	if err != nil {
+		return err
+	}
+
+	err = os.Chmod(f.Name(), 0660)
+	if err != nil {
+		return err
+	}
+
+	name := f.Name()
+	err = f.Close()
+	if err != nil {
+		return err
+	}
+
+	// atomic rename
+	d.mutex.Lock()
+	defer d.mutex.Unlock()
+	err = os.Rename(name, filename)
+	if err == nil {
+		d.ourFiles[filename] = struct{}{}
+	}
+	return err
+}
+
+// RESTClient returns a RESTClient that is used to communicate with API server
+// by this client implementation.
+func (d *CachedDiscoveryClient) RESTClient() restclient.Interface {
+	return d.delegate.RESTClient()
+}
+
+// ServerPreferredResources returns the supported resources with the version preferred by the
+// server.
+func (d *CachedDiscoveryClient) ServerPreferredResources() ([]*metav1.APIResourceList, error) {
+	return discovery.ServerPreferredResources(d)
+}
+
+// ServerPreferredNamespacedResources returns the supported namespaced resources with the
+// version preferred by the server.
+func (d *CachedDiscoveryClient) ServerPreferredNamespacedResources() ([]*metav1.APIResourceList, error) {
+	return discovery.ServerPreferredNamespacedResources(d)
+}
+
+// ServerVersion retrieves and parses the server's version (git version).
+func (d *CachedDiscoveryClient) ServerVersion() (*version.Info, error) {
+	return d.delegate.ServerVersion()
+}
+
+// OpenAPISchema retrieves and parses the swagger API schema the server supports.
+func (d *CachedDiscoveryClient) OpenAPISchema() (*openapi_v2.Document, error) {
+	return d.delegate.OpenAPISchema()
+}
+
+// Fresh is supposed to tell the caller whether or not to retry if the cache
+// fails to find something (false = retry, true = no need to retry).
+func (d *CachedDiscoveryClient) Fresh() bool {
+	d.mutex.Lock()
+	defer d.mutex.Unlock()
+
+	return d.fresh
+}
+
+// Invalidate enforces that no cached data is used in the future that is older than the current time.
+func (d *CachedDiscoveryClient) Invalidate() {
+	d.mutex.Lock()
+	defer d.mutex.Unlock()
+
+	d.ourFiles = map[string]struct{}{}
+	d.fresh = true
+	d.invalidated = true
+}
+
+// NewCachedDiscoveryClientForConfig creates a new DiscoveryClient for the given config, and wraps
+// the created client in a CachedDiscoveryClient. The provided configuration is updated with a
+// custom transport that understands cache responses.
+// We receive two distinct cache directories for now, in order to preserve old behavior
+// which makes use of the --cache-dir flag value for storing cache data from the CacheRoundTripper,
+// and makes use of the hardcoded destination (~/.kube/cache/discovery/...) for storing
+// CachedDiscoveryClient cache data. If httpCacheDir is empty, the restconfig's transport will not
+// be updated with a roundtripper that understands cache responses.
+// If discoveryCacheDir is empty, cached server resource data will be looked up in the current directory.
+// TODO(juanvallejo): the value of "--cache-dir" should be honored. Consolidate discoveryCacheDir with httpCacheDir
+// so that server resources and http-cache data are stored in the same location, provided via config flags.
+func NewCachedDiscoveryClientForConfig(config *restclient.Config, discoveryCacheDir, httpCacheDir string, ttl time.Duration) (*CachedDiscoveryClient, error) {
+	if len(httpCacheDir) > 0 {
+		// update the given restconfig with a custom roundtripper that
+		// understands how to handle cache responses.
+		config = restclient.CopyConfig(config)
+		config.Wrap(func(rt http.RoundTripper) http.RoundTripper {
+			return newCacheRoundTripper(httpCacheDir, rt)
+		})
+	}
+
+	discoveryClient, err := discovery.NewDiscoveryClientForConfig(config)
+	if err != nil {
+		return nil, err
+	}
+
+	return newCachedDiscoveryClient(discoveryClient, discoveryCacheDir, ttl), nil
+}
+
+// NewCachedDiscoveryClient creates a new DiscoveryClient.  cacheDirectory is the directory where discovery docs are held.  It must be unique per host:port combination to work well.
+func newCachedDiscoveryClient(delegate discovery.DiscoveryInterface, cacheDirectory string, ttl time.Duration) *CachedDiscoveryClient {
+	return &CachedDiscoveryClient{
+		delegate:       delegate,
+		cacheDirectory: cacheDirectory,
+		ttl:            ttl,
+		ourFiles:       map[string]struct{}{},
+		fresh:          true,
+	}
+}
diff --git a/vendor/k8s.io/client-go/discovery/cached/disk/round_tripper.go b/vendor/k8s.io/client-go/discovery/cached/disk/round_tripper.go
new file mode 100644
index 00000000..1dfb8297
--- /dev/null
+++ b/vendor/k8s.io/client-go/discovery/cached/disk/round_tripper.go
@@ -0,0 +1,65 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package disk
+
+import (
+	"net/http"
+	"os"
+	"path/filepath"
+
+	"github.com/gregjones/httpcache"
+	"github.com/gregjones/httpcache/diskcache"
+	"github.com/peterbourgon/diskv"
+	"k8s.io/klog"
+)
+
+type cacheRoundTripper struct {
+	rt *httpcache.Transport
+}
+
+// newCacheRoundTripper creates a roundtripper that reads the ETag on
+// response headers and send the If-None-Match header on subsequent
+// corresponding requests.
+func newCacheRoundTripper(cacheDir string, rt http.RoundTripper) http.RoundTripper {
+	d := diskv.New(diskv.Options{
+		PathPerm: os.FileMode(0750),
+		FilePerm: os.FileMode(0660),
+		BasePath: cacheDir,
+		TempDir:  filepath.Join(cacheDir, ".diskv-temp"),
+	})
+	t := httpcache.NewTransport(diskcache.NewWithDiskv(d))
+	t.Transport = rt
+
+	return &cacheRoundTripper{rt: t}
+}
+
+func (rt *cacheRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) {
+	return rt.rt.RoundTrip(req)
+}
+
+func (rt *cacheRoundTripper) CancelRequest(req *http.Request) {
+	type canceler interface {
+		CancelRequest(*http.Request)
+	}
+	if cr, ok := rt.rt.Transport.(canceler); ok {
+		cr.CancelRequest(req)
+	} else {
+		klog.Errorf("CancelRequest not implemented by %T", rt.rt.Transport)
+	}
+}
+
+func (rt *cacheRoundTripper) WrappedRoundTripper() http.RoundTripper { return rt.rt.Transport }
diff --git a/vendor/k8s.io/client-go/discovery/discovery_client.go b/vendor/k8s.io/client-go/discovery/discovery_client.go
index 5d89457c..dc12f9a2 100644
--- a/vendor/k8s.io/client-go/discovery/discovery_client.go
+++ b/vendor/k8s.io/client-go/discovery/discovery_client.go
@@ -17,6 +17,7 @@ limitations under the License.
 package discovery
 
 import (
+	"context"
 	"encoding/json"
 	"fmt"
 	"net/url"
@@ -155,7 +156,7 @@ func apiVersionsToAPIGroup(apiVersions *metav1.APIVersions) (apiGroup metav1.API
 func (d *DiscoveryClient) ServerGroups() (apiGroupList *metav1.APIGroupList, err error) {
 	// Get the groupVersions exposed at /api
 	v := &metav1.APIVersions{}
-	err = d.restClient.Get().AbsPath(d.LegacyPrefix).Do().Into(v)
+	err = d.restClient.Get().AbsPath(d.LegacyPrefix).Do(context.TODO()).Into(v)
 	apiGroup := metav1.APIGroup{}
 	if err == nil && len(v.Versions) != 0 {
 		apiGroup = apiVersionsToAPIGroup(v)
@@ -166,7 +167,7 @@ func (d *DiscoveryClient) ServerGroups() (apiGroupList *metav1.APIGroupList, err
 
 	// Get the groupVersions exposed at /apis
 	apiGroupList = &metav1.APIGroupList{}
-	err = d.restClient.Get().AbsPath("/apis").Do().Into(apiGroupList)
+	err = d.restClient.Get().AbsPath("/apis").Do(context.TODO()).Into(apiGroupList)
 	if err != nil && !errors.IsNotFound(err) && !errors.IsForbidden(err) {
 		return nil, err
 	}
@@ -196,7 +197,7 @@ func (d *DiscoveryClient) ServerResourcesForGroupVersion(groupVersion string) (r
 	resources = &metav1.APIResourceList{
 		GroupVersion: groupVersion,
 	}
-	err = d.restClient.Get().AbsPath(url.String()).Do().Into(resources)
+	err = d.restClient.Get().AbsPath(url.String()).Do(context.TODO()).Into(resources)
 	if err != nil {
 		// ignore 403 or 404 error to be compatible with an v1.0 server.
 		if groupVersion == "v1" && (errors.IsNotFound(err) || errors.IsForbidden(err)) {
@@ -405,7 +406,7 @@ func ServerPreferredNamespacedResources(d DiscoveryInterface) ([]*metav1.APIReso
 
 // ServerVersion retrieves and parses the server's version (git version).
 func (d *DiscoveryClient) ServerVersion() (*version.Info, error) {
-	body, err := d.restClient.Get().AbsPath("/version").Do().Raw()
+	body, err := d.restClient.Get().AbsPath("/version").Do(context.TODO()).Raw()
 	if err != nil {
 		return nil, err
 	}
@@ -419,12 +420,12 @@ func (d *DiscoveryClient) ServerVersion() (*version.Info, error) {
 
 // OpenAPISchema fetches the open api schema using a rest client and parses the proto.
 func (d *DiscoveryClient) OpenAPISchema() (*openapi_v2.Document, error) {
-	data, err := d.restClient.Get().AbsPath("/openapi/v2").SetHeader("Accept", mimePb).Do().Raw()
+	data, err := d.restClient.Get().AbsPath("/openapi/v2").SetHeader("Accept", mimePb).Do(context.TODO()).Raw()
 	if err != nil {
 		if errors.IsForbidden(err) || errors.IsNotFound(err) || errors.IsNotAcceptable(err) {
 			// single endpoint not found/registered in old server, try to fetch old endpoint
 			// TODO: remove this when kubectl/client-go don't work with 1.9 server
-			data, err = d.restClient.Get().AbsPath("/swagger-2.0.0.pb-v1").Do().Raw()
+			data, err = d.restClient.Get().AbsPath("/swagger-2.0.0.pb-v1").Do(context.TODO()).Raw()
 			if err != nil {
 				return nil, err
 			}
diff --git a/vendor/k8s.io/client-go/dynamic/interface.go b/vendor/k8s.io/client-go/dynamic/interface.go
new file mode 100644
index 00000000..b08067c3
--- /dev/null
+++ b/vendor/k8s.io/client-go/dynamic/interface.go
@@ -0,0 +1,61 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package dynamic
+
+import (
+	"context"
+
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
+	"k8s.io/apimachinery/pkg/runtime/schema"
+	"k8s.io/apimachinery/pkg/types"
+	"k8s.io/apimachinery/pkg/watch"
+)
+
+type Interface interface {
+	Resource(resource schema.GroupVersionResource) NamespaceableResourceInterface
+}
+
+type ResourceInterface interface {
+	Create(ctx context.Context, obj *unstructured.Unstructured, options metav1.CreateOptions, subresources ...string) (*unstructured.Unstructured, error)
+	Update(ctx context.Context, obj *unstructured.Unstructured, options metav1.UpdateOptions, subresources ...string) (*unstructured.Unstructured, error)
+	UpdateStatus(ctx context.Context, obj *unstructured.Unstructured, options metav1.UpdateOptions) (*unstructured.Unstructured, error)
+	Delete(ctx context.Context, name string, options metav1.DeleteOptions, subresources ...string) error
+	DeleteCollection(ctx context.Context, options metav1.DeleteOptions, listOptions metav1.ListOptions) error
+	Get(ctx context.Context, name string, options metav1.GetOptions, subresources ...string) (*unstructured.Unstructured, error)
+	List(ctx context.Context, opts metav1.ListOptions) (*unstructured.UnstructuredList, error)
+	Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error)
+	Patch(ctx context.Context, name string, pt types.PatchType, data []byte, options metav1.PatchOptions, subresources ...string) (*unstructured.Unstructured, error)
+}
+
+type NamespaceableResourceInterface interface {
+	Namespace(string) ResourceInterface
+	ResourceInterface
+}
+
+// APIPathResolverFunc knows how to convert a groupVersion to its API path. The Kind field is optional.
+// TODO find a better place to move this for existing callers
+type APIPathResolverFunc func(kind schema.GroupVersionKind) string
+
+// LegacyAPIPathResolverFunc can resolve paths properly with the legacy API.
+// TODO find a better place to move this for existing callers
+func LegacyAPIPathResolverFunc(kind schema.GroupVersionKind) string {
+	if len(kind.Group) == 0 {
+		return "/api"
+	}
+	return "/apis"
+}
diff --git a/vendor/k8s.io/client-go/dynamic/scheme.go b/vendor/k8s.io/client-go/dynamic/scheme.go
new file mode 100644
index 00000000..3168c872
--- /dev/null
+++ b/vendor/k8s.io/client-go/dynamic/scheme.go
@@ -0,0 +1,108 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package dynamic
+
+import (
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
+	"k8s.io/apimachinery/pkg/runtime"
+	"k8s.io/apimachinery/pkg/runtime/schema"
+	"k8s.io/apimachinery/pkg/runtime/serializer"
+	"k8s.io/apimachinery/pkg/runtime/serializer/json"
+)
+
+var watchScheme = runtime.NewScheme()
+var basicScheme = runtime.NewScheme()
+var deleteScheme = runtime.NewScheme()
+var parameterScheme = runtime.NewScheme()
+var deleteOptionsCodec = serializer.NewCodecFactory(deleteScheme)
+var dynamicParameterCodec = runtime.NewParameterCodec(parameterScheme)
+
+var versionV1 = schema.GroupVersion{Version: "v1"}
+
+func init() {
+	metav1.AddToGroupVersion(watchScheme, versionV1)
+	metav1.AddToGroupVersion(basicScheme, versionV1)
+	metav1.AddToGroupVersion(parameterScheme, versionV1)
+	metav1.AddToGroupVersion(deleteScheme, versionV1)
+}
+
+// basicNegotiatedSerializer is used to handle discovery and error handling serialization
+type basicNegotiatedSerializer struct{}
+
+func (s basicNegotiatedSerializer) SupportedMediaTypes() []runtime.SerializerInfo {
+	return []runtime.SerializerInfo{
+		{
+			MediaType:        "application/json",
+			MediaTypeType:    "application",
+			MediaTypeSubType: "json",
+			EncodesAsText:    true,
+			Serializer:       json.NewSerializer(json.DefaultMetaFactory, unstructuredCreater{basicScheme}, unstructuredTyper{basicScheme}, false),
+			PrettySerializer: json.NewSerializer(json.DefaultMetaFactory, unstructuredCreater{basicScheme}, unstructuredTyper{basicScheme}, true),
+			StreamSerializer: &runtime.StreamSerializerInfo{
+				EncodesAsText: true,
+				Serializer:    json.NewSerializer(json.DefaultMetaFactory, basicScheme, basicScheme, false),
+				Framer:        json.Framer,
+			},
+		},
+	}
+}
+
+func (s basicNegotiatedSerializer) EncoderForVersion(encoder runtime.Encoder, gv runtime.GroupVersioner) runtime.Encoder {
+	return runtime.WithVersionEncoder{
+		Version:     gv,
+		Encoder:     encoder,
+		ObjectTyper: unstructuredTyper{basicScheme},
+	}
+}
+
+func (s basicNegotiatedSerializer) DecoderToVersion(decoder runtime.Decoder, gv runtime.GroupVersioner) runtime.Decoder {
+	return decoder
+}
+
+type unstructuredCreater struct {
+	nested runtime.ObjectCreater
+}
+
+func (c unstructuredCreater) New(kind schema.GroupVersionKind) (runtime.Object, error) {
+	out, err := c.nested.New(kind)
+	if err == nil {
+		return out, nil
+	}
+	out = &unstructured.Unstructured{}
+	out.GetObjectKind().SetGroupVersionKind(kind)
+	return out, nil
+}
+
+type unstructuredTyper struct {
+	nested runtime.ObjectTyper
+}
+
+func (t unstructuredTyper) ObjectKinds(obj runtime.Object) ([]schema.GroupVersionKind, bool, error) {
+	kinds, unversioned, err := t.nested.ObjectKinds(obj)
+	if err == nil {
+		return kinds, unversioned, nil
+	}
+	if _, ok := obj.(runtime.Unstructured); ok && !obj.GetObjectKind().GroupVersionKind().Empty() {
+		return []schema.GroupVersionKind{obj.GetObjectKind().GroupVersionKind()}, false, nil
+	}
+	return nil, false, err
+}
+
+func (t unstructuredTyper) Recognizes(gvk schema.GroupVersionKind) bool {
+	return true
+}
diff --git a/vendor/k8s.io/client-go/dynamic/simple.go b/vendor/k8s.io/client-go/dynamic/simple.go
new file mode 100644
index 00000000..9ae320d3
--- /dev/null
+++ b/vendor/k8s.io/client-go/dynamic/simple.go
@@ -0,0 +1,327 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package dynamic
+
+import (
+	"context"
+	"fmt"
+
+	"k8s.io/apimachinery/pkg/api/meta"
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
+	"k8s.io/apimachinery/pkg/runtime"
+	"k8s.io/apimachinery/pkg/runtime/schema"
+	"k8s.io/apimachinery/pkg/types"
+	"k8s.io/apimachinery/pkg/watch"
+	"k8s.io/client-go/rest"
+)
+
+type dynamicClient struct {
+	client *rest.RESTClient
+}
+
+var _ Interface = &dynamicClient{}
+
+// ConfigFor returns a copy of the provided config with the
+// appropriate dynamic client defaults set.
+func ConfigFor(inConfig *rest.Config) *rest.Config {
+	config := rest.CopyConfig(inConfig)
+	config.AcceptContentTypes = "application/json"
+	config.ContentType = "application/json"
+	config.NegotiatedSerializer = basicNegotiatedSerializer{} // this gets used for discovery and error handling types
+	if config.UserAgent == "" {
+		config.UserAgent = rest.DefaultKubernetesUserAgent()
+	}
+	return config
+}
+
+// NewForConfigOrDie creates a new Interface for the given config and
+// panics if there is an error in the config.
+func NewForConfigOrDie(c *rest.Config) Interface {
+	ret, err := NewForConfig(c)
+	if err != nil {
+		panic(err)
+	}
+	return ret
+}
+
+// NewForConfig creates a new dynamic client or returns an error.
+func NewForConfig(inConfig *rest.Config) (Interface, error) {
+	config := ConfigFor(inConfig)
+	// for serializing the options
+	config.GroupVersion = &schema.GroupVersion{}
+	config.APIPath = "/if-you-see-this-search-for-the-break"
+
+	restClient, err := rest.RESTClientFor(config)
+	if err != nil {
+		return nil, err
+	}
+
+	return &dynamicClient{client: restClient}, nil
+}
+
+type dynamicResourceClient struct {
+	client    *dynamicClient
+	namespace string
+	resource  schema.GroupVersionResource
+}
+
+func (c *dynamicClient) Resource(resource schema.GroupVersionResource) NamespaceableResourceInterface {
+	return &dynamicResourceClient{client: c, resource: resource}
+}
+
+func (c *dynamicResourceClient) Namespace(ns string) ResourceInterface {
+	ret := *c
+	ret.namespace = ns
+	return &ret
+}
+
+func (c *dynamicResourceClient) Create(ctx context.Context, obj *unstructured.Unstructured, opts metav1.CreateOptions, subresources ...string) (*unstructured.Unstructured, error) {
+	outBytes, err := runtime.Encode(unstructured.UnstructuredJSONScheme, obj)
+	if err != nil {
+		return nil, err
+	}
+	name := ""
+	if len(subresources) > 0 {
+		accessor, err := meta.Accessor(obj)
+		if err != nil {
+			return nil, err
+		}
+		name = accessor.GetName()
+		if len(name) == 0 {
+			return nil, fmt.Errorf("name is required")
+		}
+	}
+
+	result := c.client.client.
+		Post().
+		AbsPath(append(c.makeURLSegments(name), subresources...)...).
+		Body(outBytes).
+		SpecificallyVersionedParams(&opts, dynamicParameterCodec, versionV1).
+		Do(ctx)
+	if err := result.Error(); err != nil {
+		return nil, err
+	}
+
+	retBytes, err := result.Raw()
+	if err != nil {
+		return nil, err
+	}
+	uncastObj, err := runtime.Decode(unstructured.UnstructuredJSONScheme, retBytes)
+	if err != nil {
+		return nil, err
+	}
+	return uncastObj.(*unstructured.Unstructured), nil
+}
+
+func (c *dynamicResourceClient) Update(ctx context.Context, obj *unstructured.Unstructured, opts metav1.UpdateOptions, subresources ...string) (*unstructured.Unstructured, error) {
+	accessor, err := meta.Accessor(obj)
+	if err != nil {
+		return nil, err
+	}
+	name := accessor.GetName()
+	if len(name) == 0 {
+		return nil, fmt.Errorf("name is required")
+	}
+	outBytes, err := runtime.Encode(unstructured.UnstructuredJSONScheme, obj)
+	if err != nil {
+		return nil, err
+	}
+
+	result := c.client.client.
+		Put().
+		AbsPath(append(c.makeURLSegments(name), subresources...)...).
+		Body(outBytes).
+		SpecificallyVersionedParams(&opts, dynamicParameterCodec, versionV1).
+		Do(ctx)
+	if err := result.Error(); err != nil {
+		return nil, err
+	}
+
+	retBytes, err := result.Raw()
+	if err != nil {
+		return nil, err
+	}
+	uncastObj, err := runtime.Decode(unstructured.UnstructuredJSONScheme, retBytes)
+	if err != nil {
+		return nil, err
+	}
+	return uncastObj.(*unstructured.Unstructured), nil
+}
+
+func (c *dynamicResourceClient) UpdateStatus(ctx context.Context, obj *unstructured.Unstructured, opts metav1.UpdateOptions) (*unstructured.Unstructured, error) {
+	accessor, err := meta.Accessor(obj)
+	if err != nil {
+		return nil, err
+	}
+	name := accessor.GetName()
+	if len(name) == 0 {
+		return nil, fmt.Errorf("name is required")
+	}
+
+	outBytes, err := runtime.Encode(unstructured.UnstructuredJSONScheme, obj)
+	if err != nil {
+		return nil, err
+	}
+
+	result := c.client.client.
+		Put().
+		AbsPath(append(c.makeURLSegments(name), "status")...).
+		Body(outBytes).
+		SpecificallyVersionedParams(&opts, dynamicParameterCodec, versionV1).
+		Do(ctx)
+	if err := result.Error(); err != nil {
+		return nil, err
+	}
+
+	retBytes, err := result.Raw()
+	if err != nil {
+		return nil, err
+	}
+	uncastObj, err := runtime.Decode(unstructured.UnstructuredJSONScheme, retBytes)
+	if err != nil {
+		return nil, err
+	}
+	return uncastObj.(*unstructured.Unstructured), nil
+}
+
+func (c *dynamicResourceClient) Delete(ctx context.Context, name string, opts metav1.DeleteOptions, subresources ...string) error {
+	if len(name) == 0 {
+		return fmt.Errorf("name is required")
+	}
+	deleteOptionsByte, err := runtime.Encode(deleteOptionsCodec.LegacyCodec(schema.GroupVersion{Version: "v1"}), &opts)
+	if err != nil {
+		return err
+	}
+
+	result := c.client.client.
+		Delete().
+		AbsPath(append(c.makeURLSegments(name), subresources...)...).
+		Body(deleteOptionsByte).
+		Do(ctx)
+	return result.Error()
+}
+
+func (c *dynamicResourceClient) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOptions metav1.ListOptions) error {
+	deleteOptionsByte, err := runtime.Encode(deleteOptionsCodec.LegacyCodec(schema.GroupVersion{Version: "v1"}), &opts)
+	if err != nil {
+		return err
+	}
+
+	result := c.client.client.
+		Delete().
+		AbsPath(c.makeURLSegments("")...).
+		Body(deleteOptionsByte).
+		SpecificallyVersionedParams(&listOptions, dynamicParameterCodec, versionV1).
+		Do(ctx)
+	return result.Error()
+}
+
+func (c *dynamicResourceClient) Get(ctx context.Context, name string, opts metav1.GetOptions, subresources ...string) (*unstructured.Unstructured, error) {
+	if len(name) == 0 {
+		return nil, fmt.Errorf("name is required")
+	}
+	result := c.client.client.Get().AbsPath(append(c.makeURLSegments(name), subresources...)...).SpecificallyVersionedParams(&opts, dynamicParameterCodec, versionV1).Do(ctx)
+	if err := result.Error(); err != nil {
+		return nil, err
+	}
+	retBytes, err := result.Raw()
+	if err != nil {
+		return nil, err
+	}
+	uncastObj, err := runtime.Decode(unstructured.UnstructuredJSONScheme, retBytes)
+	if err != nil {
+		return nil, err
+	}
+	return uncastObj.(*unstructured.Unstructured), nil
+}
+
+func (c *dynamicResourceClient) List(ctx context.Context, opts metav1.ListOptions) (*unstructured.UnstructuredList, error) {
+	result := c.client.client.Get().AbsPath(c.makeURLSegments("")...).SpecificallyVersionedParams(&opts, dynamicParameterCodec, versionV1).Do(ctx)
+	if err := result.Error(); err != nil {
+		return nil, err
+	}
+	retBytes, err := result.Raw()
+	if err != nil {
+		return nil, err
+	}
+	uncastObj, err := runtime.Decode(unstructured.UnstructuredJSONScheme, retBytes)
+	if err != nil {
+		return nil, err
+	}
+	if list, ok := uncastObj.(*unstructured.UnstructuredList); ok {
+		return list, nil
+	}
+
+	list, err := uncastObj.(*unstructured.Unstructured).ToList()
+	if err != nil {
+		return nil, err
+	}
+	return list, nil
+}
+
+func (c *dynamicResourceClient) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
+	opts.Watch = true
+	return c.client.client.Get().AbsPath(c.makeURLSegments("")...).
+		SpecificallyVersionedParams(&opts, dynamicParameterCodec, versionV1).
+		Watch(ctx)
+}
+
+func (c *dynamicResourceClient) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (*unstructured.Unstructured, error) {
+	if len(name) == 0 {
+		return nil, fmt.Errorf("name is required")
+	}
+	result := c.client.client.
+		Patch(pt).
+		AbsPath(append(c.makeURLSegments(name), subresources...)...).
+		Body(data).
+		SpecificallyVersionedParams(&opts, dynamicParameterCodec, versionV1).
+		Do(ctx)
+	if err := result.Error(); err != nil {
+		return nil, err
+	}
+	retBytes, err := result.Raw()
+	if err != nil {
+		return nil, err
+	}
+	uncastObj, err := runtime.Decode(unstructured.UnstructuredJSONScheme, retBytes)
+	if err != nil {
+		return nil, err
+	}
+	return uncastObj.(*unstructured.Unstructured), nil
+}
+
+func (c *dynamicResourceClient) makeURLSegments(name string) []string {
+	url := []string{}
+	if len(c.resource.Group) == 0 {
+		url = append(url, "api")
+	} else {
+		url = append(url, "apis", c.resource.Group)
+	}
+	url = append(url, c.resource.Version)
+
+	if len(c.namespace) > 0 {
+		url = append(url, "namespaces", c.namespace)
+	}
+	url = append(url, c.resource.Resource)
+
+	if len(name) > 0 {
+		url = append(url, name)
+	}
+
+	return url
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/clientset.go b/vendor/k8s.io/client-go/kubernetes/clientset.go
index cf98b050..d76e9ac9 100644
--- a/vendor/k8s.io/client-go/kubernetes/clientset.go
+++ b/vendor/k8s.io/client-go/kubernetes/clientset.go
@@ -371,7 +371,7 @@ func NewForConfig(c *rest.Config) (*Clientset, error) {
 	configShallowCopy := *c
 	if configShallowCopy.RateLimiter == nil && configShallowCopy.QPS > 0 {
 		if configShallowCopy.Burst <= 0 {
-			return nil, fmt.Errorf("Burst is required to be greater than 0 when RateLimiter is not set and QPS is set to greater than 0")
+			return nil, fmt.Errorf("burst is required to be greater than 0 when RateLimiter is not set and QPS is set to greater than 0")
 		}
 		configShallowCopy.RateLimiter = flowcontrol.NewTokenBucketRateLimiter(configShallowCopy.QPS, configShallowCopy.Burst)
 	}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/mutatingwebhookconfiguration.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/mutatingwebhookconfiguration.go
index 1f5e5e38..cf458f48 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/mutatingwebhookconfiguration.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/mutatingwebhookconfiguration.go
@@ -19,6 +19,7 @@ limitations under the License.
 package v1
 
 import (
+	"context"
 	"time"
 
 	v1 "k8s.io/api/admissionregistration/v1"
@@ -37,14 +38,14 @@ type MutatingWebhookConfigurationsGetter interface {
 
 // MutatingWebhookConfigurationInterface has methods to work with MutatingWebhookConfiguration resources.
 type MutatingWebhookConfigurationInterface interface {
-	Create(*v1.MutatingWebhookConfiguration) (*v1.MutatingWebhookConfiguration, error)
-	Update(*v1.MutatingWebhookConfiguration) (*v1.MutatingWebhookConfiguration, error)
-	Delete(name string, options *metav1.DeleteOptions) error
-	DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error
-	Get(name string, options metav1.GetOptions) (*v1.MutatingWebhookConfiguration, error)
-	List(opts metav1.ListOptions) (*v1.MutatingWebhookConfigurationList, error)
-	Watch(opts metav1.ListOptions) (watch.Interface, error)
-	Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.MutatingWebhookConfiguration, err error)
+	Create(ctx context.Context, mutatingWebhookConfiguration *v1.MutatingWebhookConfiguration, opts metav1.CreateOptions) (*v1.MutatingWebhookConfiguration, error)
+	Update(ctx context.Context, mutatingWebhookConfiguration *v1.MutatingWebhookConfiguration, opts metav1.UpdateOptions) (*v1.MutatingWebhookConfiguration, error)
+	Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error
+	DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error
+	Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.MutatingWebhookConfiguration, error)
+	List(ctx context.Context, opts metav1.ListOptions) (*v1.MutatingWebhookConfigurationList, error)
+	Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error)
+	Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.MutatingWebhookConfiguration, err error)
 	MutatingWebhookConfigurationExpansion
 }
 
@@ -61,19 +62,19 @@ func newMutatingWebhookConfigurations(c *AdmissionregistrationV1Client) *mutatin
 }
 
 // Get takes name of the mutatingWebhookConfiguration, and returns the corresponding mutatingWebhookConfiguration object, and an error if there is any.
-func (c *mutatingWebhookConfigurations) Get(name string, options metav1.GetOptions) (result *v1.MutatingWebhookConfiguration, err error) {
+func (c *mutatingWebhookConfigurations) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.MutatingWebhookConfiguration, err error) {
 	result = &v1.MutatingWebhookConfiguration{}
 	err = c.client.Get().
 		Resource("mutatingwebhookconfigurations").
 		Name(name).
 		VersionedParams(&options, scheme.ParameterCodec).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // List takes label and field selectors, and returns the list of MutatingWebhookConfigurations that match those selectors.
-func (c *mutatingWebhookConfigurations) List(opts metav1.ListOptions) (result *v1.MutatingWebhookConfigurationList, err error) {
+func (c *mutatingWebhookConfigurations) List(ctx context.Context, opts metav1.ListOptions) (result *v1.MutatingWebhookConfigurationList, err error) {
 	var timeout time.Duration
 	if opts.TimeoutSeconds != nil {
 		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
@@ -83,13 +84,13 @@ func (c *mutatingWebhookConfigurations) List(opts metav1.ListOptions) (result *v
 		Resource("mutatingwebhookconfigurations").
 		VersionedParams(&opts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Watch returns a watch.Interface that watches the requested mutatingWebhookConfigurations.
-func (c *mutatingWebhookConfigurations) Watch(opts metav1.ListOptions) (watch.Interface, error) {
+func (c *mutatingWebhookConfigurations) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
 	var timeout time.Duration
 	if opts.TimeoutSeconds != nil {
 		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
@@ -99,66 +100,69 @@ func (c *mutatingWebhookConfigurations) Watch(opts metav1.ListOptions) (watch.In
 		Resource("mutatingwebhookconfigurations").
 		VersionedParams(&opts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Watch()
+		Watch(ctx)
 }
 
 // Create takes the representation of a mutatingWebhookConfiguration and creates it.  Returns the server's representation of the mutatingWebhookConfiguration, and an error, if there is any.
-func (c *mutatingWebhookConfigurations) Create(mutatingWebhookConfiguration *v1.MutatingWebhookConfiguration) (result *v1.MutatingWebhookConfiguration, err error) {
+func (c *mutatingWebhookConfigurations) Create(ctx context.Context, mutatingWebhookConfiguration *v1.MutatingWebhookConfiguration, opts metav1.CreateOptions) (result *v1.MutatingWebhookConfiguration, err error) {
 	result = &v1.MutatingWebhookConfiguration{}
 	err = c.client.Post().
 		Resource("mutatingwebhookconfigurations").
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(mutatingWebhookConfiguration).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Update takes the representation of a mutatingWebhookConfiguration and updates it. Returns the server's representation of the mutatingWebhookConfiguration, and an error, if there is any.
-func (c *mutatingWebhookConfigurations) Update(mutatingWebhookConfiguration *v1.MutatingWebhookConfiguration) (result *v1.MutatingWebhookConfiguration, err error) {
+func (c *mutatingWebhookConfigurations) Update(ctx context.Context, mutatingWebhookConfiguration *v1.MutatingWebhookConfiguration, opts metav1.UpdateOptions) (result *v1.MutatingWebhookConfiguration, err error) {
 	result = &v1.MutatingWebhookConfiguration{}
 	err = c.client.Put().
 		Resource("mutatingwebhookconfigurations").
 		Name(mutatingWebhookConfiguration.Name).
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(mutatingWebhookConfiguration).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Delete takes name of the mutatingWebhookConfiguration and deletes it. Returns an error if one occurs.
-func (c *mutatingWebhookConfigurations) Delete(name string, options *metav1.DeleteOptions) error {
+func (c *mutatingWebhookConfigurations) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error {
 	return c.client.Delete().
 		Resource("mutatingwebhookconfigurations").
 		Name(name).
-		Body(options).
-		Do().
+		Body(&opts).
+		Do(ctx).
 		Error()
 }
 
 // DeleteCollection deletes a collection of objects.
-func (c *mutatingWebhookConfigurations) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error {
+func (c *mutatingWebhookConfigurations) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error {
 	var timeout time.Duration
-	if listOptions.TimeoutSeconds != nil {
-		timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second
+	if listOpts.TimeoutSeconds != nil {
+		timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
 	}
 	return c.client.Delete().
 		Resource("mutatingwebhookconfigurations").
-		VersionedParams(&listOptions, scheme.ParameterCodec).
+		VersionedParams(&listOpts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Body(options).
-		Do().
+		Body(&opts).
+		Do(ctx).
 		Error()
 }
 
 // Patch applies the patch and returns the patched mutatingWebhookConfiguration.
-func (c *mutatingWebhookConfigurations) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.MutatingWebhookConfiguration, err error) {
+func (c *mutatingWebhookConfigurations) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.MutatingWebhookConfiguration, err error) {
 	result = &v1.MutatingWebhookConfiguration{}
 	err = c.client.Patch(pt).
 		Resource("mutatingwebhookconfigurations").
-		SubResource(subresources...).
 		Name(name).
+		SubResource(subresources...).
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(data).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/validatingwebhookconfiguration.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/validatingwebhookconfiguration.go
index 7987b6e3..c7191c0f 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/validatingwebhookconfiguration.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/validatingwebhookconfiguration.go
@@ -19,6 +19,7 @@ limitations under the License.
 package v1
 
 import (
+	"context"
 	"time"
 
 	v1 "k8s.io/api/admissionregistration/v1"
@@ -37,14 +38,14 @@ type ValidatingWebhookConfigurationsGetter interface {
 
 // ValidatingWebhookConfigurationInterface has methods to work with ValidatingWebhookConfiguration resources.
 type ValidatingWebhookConfigurationInterface interface {
-	Create(*v1.ValidatingWebhookConfiguration) (*v1.ValidatingWebhookConfiguration, error)
-	Update(*v1.ValidatingWebhookConfiguration) (*v1.ValidatingWebhookConfiguration, error)
-	Delete(name string, options *metav1.DeleteOptions) error
-	DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error
-	Get(name string, options metav1.GetOptions) (*v1.ValidatingWebhookConfiguration, error)
-	List(opts metav1.ListOptions) (*v1.ValidatingWebhookConfigurationList, error)
-	Watch(opts metav1.ListOptions) (watch.Interface, error)
-	Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.ValidatingWebhookConfiguration, err error)
+	Create(ctx context.Context, validatingWebhookConfiguration *v1.ValidatingWebhookConfiguration, opts metav1.CreateOptions) (*v1.ValidatingWebhookConfiguration, error)
+	Update(ctx context.Context, validatingWebhookConfiguration *v1.ValidatingWebhookConfiguration, opts metav1.UpdateOptions) (*v1.ValidatingWebhookConfiguration, error)
+	Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error
+	DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error
+	Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.ValidatingWebhookConfiguration, error)
+	List(ctx context.Context, opts metav1.ListOptions) (*v1.ValidatingWebhookConfigurationList, error)
+	Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error)
+	Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ValidatingWebhookConfiguration, err error)
 	ValidatingWebhookConfigurationExpansion
 }
 
@@ -61,19 +62,19 @@ func newValidatingWebhookConfigurations(c *AdmissionregistrationV1Client) *valid
 }
 
 // Get takes name of the validatingWebhookConfiguration, and returns the corresponding validatingWebhookConfiguration object, and an error if there is any.
-func (c *validatingWebhookConfigurations) Get(name string, options metav1.GetOptions) (result *v1.ValidatingWebhookConfiguration, err error) {
+func (c *validatingWebhookConfigurations) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.ValidatingWebhookConfiguration, err error) {
 	result = &v1.ValidatingWebhookConfiguration{}
 	err = c.client.Get().
 		Resource("validatingwebhookconfigurations").
 		Name(name).
 		VersionedParams(&options, scheme.ParameterCodec).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // List takes label and field selectors, and returns the list of ValidatingWebhookConfigurations that match those selectors.
-func (c *validatingWebhookConfigurations) List(opts metav1.ListOptions) (result *v1.ValidatingWebhookConfigurationList, err error) {
+func (c *validatingWebhookConfigurations) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ValidatingWebhookConfigurationList, err error) {
 	var timeout time.Duration
 	if opts.TimeoutSeconds != nil {
 		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
@@ -83,13 +84,13 @@ func (c *validatingWebhookConfigurations) List(opts metav1.ListOptions) (result
 		Resource("validatingwebhookconfigurations").
 		VersionedParams(&opts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Watch returns a watch.Interface that watches the requested validatingWebhookConfigurations.
-func (c *validatingWebhookConfigurations) Watch(opts metav1.ListOptions) (watch.Interface, error) {
+func (c *validatingWebhookConfigurations) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
 	var timeout time.Duration
 	if opts.TimeoutSeconds != nil {
 		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
@@ -99,66 +100,69 @@ func (c *validatingWebhookConfigurations) Watch(opts metav1.ListOptions) (watch.
 		Resource("validatingwebhookconfigurations").
 		VersionedParams(&opts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Watch()
+		Watch(ctx)
 }
 
 // Create takes the representation of a validatingWebhookConfiguration and creates it.  Returns the server's representation of the validatingWebhookConfiguration, and an error, if there is any.
-func (c *validatingWebhookConfigurations) Create(validatingWebhookConfiguration *v1.ValidatingWebhookConfiguration) (result *v1.ValidatingWebhookConfiguration, err error) {
+func (c *validatingWebhookConfigurations) Create(ctx context.Context, validatingWebhookConfiguration *v1.ValidatingWebhookConfiguration, opts metav1.CreateOptions) (result *v1.ValidatingWebhookConfiguration, err error) {
 	result = &v1.ValidatingWebhookConfiguration{}
 	err = c.client.Post().
 		Resource("validatingwebhookconfigurations").
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(validatingWebhookConfiguration).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Update takes the representation of a validatingWebhookConfiguration and updates it. Returns the server's representation of the validatingWebhookConfiguration, and an error, if there is any.
-func (c *validatingWebhookConfigurations) Update(validatingWebhookConfiguration *v1.ValidatingWebhookConfiguration) (result *v1.ValidatingWebhookConfiguration, err error) {
+func (c *validatingWebhookConfigurations) Update(ctx context.Context, validatingWebhookConfiguration *v1.ValidatingWebhookConfiguration, opts metav1.UpdateOptions) (result *v1.ValidatingWebhookConfiguration, err error) {
 	result = &v1.ValidatingWebhookConfiguration{}
 	err = c.client.Put().
 		Resource("validatingwebhookconfigurations").
 		Name(validatingWebhookConfiguration.Name).
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(validatingWebhookConfiguration).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Delete takes name of the validatingWebhookConfiguration and deletes it. Returns an error if one occurs.
-func (c *validatingWebhookConfigurations) Delete(name string, options *metav1.DeleteOptions) error {
+func (c *validatingWebhookConfigurations) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error {
 	return c.client.Delete().
 		Resource("validatingwebhookconfigurations").
 		Name(name).
-		Body(options).
-		Do().
+		Body(&opts).
+		Do(ctx).
 		Error()
 }
 
 // DeleteCollection deletes a collection of objects.
-func (c *validatingWebhookConfigurations) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error {
+func (c *validatingWebhookConfigurations) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error {
 	var timeout time.Duration
-	if listOptions.TimeoutSeconds != nil {
-		timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second
+	if listOpts.TimeoutSeconds != nil {
+		timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
 	}
 	return c.client.Delete().
 		Resource("validatingwebhookconfigurations").
-		VersionedParams(&listOptions, scheme.ParameterCodec).
+		VersionedParams(&listOpts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Body(options).
-		Do().
+		Body(&opts).
+		Do(ctx).
 		Error()
 }
 
 // Patch applies the patch and returns the patched validatingWebhookConfiguration.
-func (c *validatingWebhookConfigurations) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.ValidatingWebhookConfiguration, err error) {
+func (c *validatingWebhookConfigurations) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ValidatingWebhookConfiguration, err error) {
 	result = &v1.ValidatingWebhookConfiguration{}
 	err = c.client.Patch(pt).
 		Resource("validatingwebhookconfigurations").
-		SubResource(subresources...).
 		Name(name).
+		SubResource(subresources...).
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(data).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/mutatingwebhookconfiguration.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/mutatingwebhookconfiguration.go
index 4524896c..73ab9ecd 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/mutatingwebhookconfiguration.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/mutatingwebhookconfiguration.go
@@ -19,6 +19,7 @@ limitations under the License.
 package v1beta1
 
 import (
+	"context"
 	"time"
 
 	v1beta1 "k8s.io/api/admissionregistration/v1beta1"
@@ -37,14 +38,14 @@ type MutatingWebhookConfigurationsGetter interface {
 
 // MutatingWebhookConfigurationInterface has methods to work with MutatingWebhookConfiguration resources.
 type MutatingWebhookConfigurationInterface interface {
-	Create(*v1beta1.MutatingWebhookConfiguration) (*v1beta1.MutatingWebhookConfiguration, error)
-	Update(*v1beta1.MutatingWebhookConfiguration) (*v1beta1.MutatingWebhookConfiguration, error)
-	Delete(name string, options *v1.DeleteOptions) error
-	DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error
-	Get(name string, options v1.GetOptions) (*v1beta1.MutatingWebhookConfiguration, error)
-	List(opts v1.ListOptions) (*v1beta1.MutatingWebhookConfigurationList, error)
-	Watch(opts v1.ListOptions) (watch.Interface, error)
-	Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.MutatingWebhookConfiguration, err error)
+	Create(ctx context.Context, mutatingWebhookConfiguration *v1beta1.MutatingWebhookConfiguration, opts v1.CreateOptions) (*v1beta1.MutatingWebhookConfiguration, error)
+	Update(ctx context.Context, mutatingWebhookConfiguration *v1beta1.MutatingWebhookConfiguration, opts v1.UpdateOptions) (*v1beta1.MutatingWebhookConfiguration, error)
+	Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
+	DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
+	Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.MutatingWebhookConfiguration, error)
+	List(ctx context.Context, opts v1.ListOptions) (*v1beta1.MutatingWebhookConfigurationList, error)
+	Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
+	Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.MutatingWebhookConfiguration, err error)
 	MutatingWebhookConfigurationExpansion
 }
 
@@ -61,19 +62,19 @@ func newMutatingWebhookConfigurations(c *AdmissionregistrationV1beta1Client) *mu
 }
 
 // Get takes name of the mutatingWebhookConfiguration, and returns the corresponding mutatingWebhookConfiguration object, and an error if there is any.
-func (c *mutatingWebhookConfigurations) Get(name string, options v1.GetOptions) (result *v1beta1.MutatingWebhookConfiguration, err error) {
+func (c *mutatingWebhookConfigurations) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.MutatingWebhookConfiguration, err error) {
 	result = &v1beta1.MutatingWebhookConfiguration{}
 	err = c.client.Get().
 		Resource("mutatingwebhookconfigurations").
 		Name(name).
 		VersionedParams(&options, scheme.ParameterCodec).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // List takes label and field selectors, and returns the list of MutatingWebhookConfigurations that match those selectors.
-func (c *mutatingWebhookConfigurations) List(opts v1.ListOptions) (result *v1beta1.MutatingWebhookConfigurationList, err error) {
+func (c *mutatingWebhookConfigurations) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.MutatingWebhookConfigurationList, err error) {
 	var timeout time.Duration
 	if opts.TimeoutSeconds != nil {
 		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
@@ -83,13 +84,13 @@ func (c *mutatingWebhookConfigurations) List(opts v1.ListOptions) (result *v1bet
 		Resource("mutatingwebhookconfigurations").
 		VersionedParams(&opts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Watch returns a watch.Interface that watches the requested mutatingWebhookConfigurations.
-func (c *mutatingWebhookConfigurations) Watch(opts v1.ListOptions) (watch.Interface, error) {
+func (c *mutatingWebhookConfigurations) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
 	var timeout time.Duration
 	if opts.TimeoutSeconds != nil {
 		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
@@ -99,66 +100,69 @@ func (c *mutatingWebhookConfigurations) Watch(opts v1.ListOptions) (watch.Interf
 		Resource("mutatingwebhookconfigurations").
 		VersionedParams(&opts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Watch()
+		Watch(ctx)
 }
 
 // Create takes the representation of a mutatingWebhookConfiguration and creates it.  Returns the server's representation of the mutatingWebhookConfiguration, and an error, if there is any.
-func (c *mutatingWebhookConfigurations) Create(mutatingWebhookConfiguration *v1beta1.MutatingWebhookConfiguration) (result *v1beta1.MutatingWebhookConfiguration, err error) {
+func (c *mutatingWebhookConfigurations) Create(ctx context.Context, mutatingWebhookConfiguration *v1beta1.MutatingWebhookConfiguration, opts v1.CreateOptions) (result *v1beta1.MutatingWebhookConfiguration, err error) {
 	result = &v1beta1.MutatingWebhookConfiguration{}
 	err = c.client.Post().
 		Resource("mutatingwebhookconfigurations").
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(mutatingWebhookConfiguration).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Update takes the representation of a mutatingWebhookConfiguration and updates it. Returns the server's representation of the mutatingWebhookConfiguration, and an error, if there is any.
-func (c *mutatingWebhookConfigurations) Update(mutatingWebhookConfiguration *v1beta1.MutatingWebhookConfiguration) (result *v1beta1.MutatingWebhookConfiguration, err error) {
+func (c *mutatingWebhookConfigurations) Update(ctx context.Context, mutatingWebhookConfiguration *v1beta1.MutatingWebhookConfiguration, opts v1.UpdateOptions) (result *v1beta1.MutatingWebhookConfiguration, err error) {
 	result = &v1beta1.MutatingWebhookConfiguration{}
 	err = c.client.Put().
 		Resource("mutatingwebhookconfigurations").
 		Name(mutatingWebhookConfiguration.Name).
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(mutatingWebhookConfiguration).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Delete takes name of the mutatingWebhookConfiguration and deletes it. Returns an error if one occurs.
-func (c *mutatingWebhookConfigurations) Delete(name string, options *v1.DeleteOptions) error {
+func (c *mutatingWebhookConfigurations) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
 	return c.client.Delete().
 		Resource("mutatingwebhookconfigurations").
 		Name(name).
-		Body(options).
-		Do().
+		Body(&opts).
+		Do(ctx).
 		Error()
 }
 
 // DeleteCollection deletes a collection of objects.
-func (c *mutatingWebhookConfigurations) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
+func (c *mutatingWebhookConfigurations) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
 	var timeout time.Duration
-	if listOptions.TimeoutSeconds != nil {
-		timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second
+	if listOpts.TimeoutSeconds != nil {
+		timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
 	}
 	return c.client.Delete().
 		Resource("mutatingwebhookconfigurations").
-		VersionedParams(&listOptions, scheme.ParameterCodec).
+		VersionedParams(&listOpts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Body(options).
-		Do().
+		Body(&opts).
+		Do(ctx).
 		Error()
 }
 
 // Patch applies the patch and returns the patched mutatingWebhookConfiguration.
-func (c *mutatingWebhookConfigurations) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.MutatingWebhookConfiguration, err error) {
+func (c *mutatingWebhookConfigurations) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.MutatingWebhookConfiguration, err error) {
 	result = &v1beta1.MutatingWebhookConfiguration{}
 	err = c.client.Patch(pt).
 		Resource("mutatingwebhookconfigurations").
-		SubResource(subresources...).
 		Name(name).
+		SubResource(subresources...).
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(data).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/validatingwebhookconfiguration.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/validatingwebhookconfiguration.go
index 7e711b30..5ab0b9e3 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/validatingwebhookconfiguration.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/validatingwebhookconfiguration.go
@@ -19,6 +19,7 @@ limitations under the License.
 package v1beta1
 
 import (
+	"context"
 	"time"
 
 	v1beta1 "k8s.io/api/admissionregistration/v1beta1"
@@ -37,14 +38,14 @@ type ValidatingWebhookConfigurationsGetter interface {
 
 // ValidatingWebhookConfigurationInterface has methods to work with ValidatingWebhookConfiguration resources.
 type ValidatingWebhookConfigurationInterface interface {
-	Create(*v1beta1.ValidatingWebhookConfiguration) (*v1beta1.ValidatingWebhookConfiguration, error)
-	Update(*v1beta1.ValidatingWebhookConfiguration) (*v1beta1.ValidatingWebhookConfiguration, error)
-	Delete(name string, options *v1.DeleteOptions) error
-	DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error
-	Get(name string, options v1.GetOptions) (*v1beta1.ValidatingWebhookConfiguration, error)
-	List(opts v1.ListOptions) (*v1beta1.ValidatingWebhookConfigurationList, error)
-	Watch(opts v1.ListOptions) (watch.Interface, error)
-	Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.ValidatingWebhookConfiguration, err error)
+	Create(ctx context.Context, validatingWebhookConfiguration *v1beta1.ValidatingWebhookConfiguration, opts v1.CreateOptions) (*v1beta1.ValidatingWebhookConfiguration, error)
+	Update(ctx context.Context, validatingWebhookConfiguration *v1beta1.ValidatingWebhookConfiguration, opts v1.UpdateOptions) (*v1beta1.ValidatingWebhookConfiguration, error)
+	Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
+	DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
+	Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.ValidatingWebhookConfiguration, error)
+	List(ctx context.Context, opts v1.ListOptions) (*v1beta1.ValidatingWebhookConfigurationList, error)
+	Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
+	Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.ValidatingWebhookConfiguration, err error)
 	ValidatingWebhookConfigurationExpansion
 }
 
@@ -61,19 +62,19 @@ func newValidatingWebhookConfigurations(c *AdmissionregistrationV1beta1Client) *
 }
 
 // Get takes name of the validatingWebhookConfiguration, and returns the corresponding validatingWebhookConfiguration object, and an error if there is any.
-func (c *validatingWebhookConfigurations) Get(name string, options v1.GetOptions) (result *v1beta1.ValidatingWebhookConfiguration, err error) {
+func (c *validatingWebhookConfigurations) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.ValidatingWebhookConfiguration, err error) {
 	result = &v1beta1.ValidatingWebhookConfiguration{}
 	err = c.client.Get().
 		Resource("validatingwebhookconfigurations").
 		Name(name).
 		VersionedParams(&options, scheme.ParameterCodec).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // List takes label and field selectors, and returns the list of ValidatingWebhookConfigurations that match those selectors.
-func (c *validatingWebhookConfigurations) List(opts v1.ListOptions) (result *v1beta1.ValidatingWebhookConfigurationList, err error) {
+func (c *validatingWebhookConfigurations) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.ValidatingWebhookConfigurationList, err error) {
 	var timeout time.Duration
 	if opts.TimeoutSeconds != nil {
 		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
@@ -83,13 +84,13 @@ func (c *validatingWebhookConfigurations) List(opts v1.ListOptions) (result *v1b
 		Resource("validatingwebhookconfigurations").
 		VersionedParams(&opts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Watch returns a watch.Interface that watches the requested validatingWebhookConfigurations.
-func (c *validatingWebhookConfigurations) Watch(opts v1.ListOptions) (watch.Interface, error) {
+func (c *validatingWebhookConfigurations) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
 	var timeout time.Duration
 	if opts.TimeoutSeconds != nil {
 		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
@@ -99,66 +100,69 @@ func (c *validatingWebhookConfigurations) Watch(opts v1.ListOptions) (watch.Inte
 		Resource("validatingwebhookconfigurations").
 		VersionedParams(&opts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Watch()
+		Watch(ctx)
 }
 
 // Create takes the representation of a validatingWebhookConfiguration and creates it.  Returns the server's representation of the validatingWebhookConfiguration, and an error, if there is any.
-func (c *validatingWebhookConfigurations) Create(validatingWebhookConfiguration *v1beta1.ValidatingWebhookConfiguration) (result *v1beta1.ValidatingWebhookConfiguration, err error) {
+func (c *validatingWebhookConfigurations) Create(ctx context.Context, validatingWebhookConfiguration *v1beta1.ValidatingWebhookConfiguration, opts v1.CreateOptions) (result *v1beta1.ValidatingWebhookConfiguration, err error) {
 	result = &v1beta1.ValidatingWebhookConfiguration{}
 	err = c.client.Post().
 		Resource("validatingwebhookconfigurations").
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(validatingWebhookConfiguration).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Update takes the representation of a validatingWebhookConfiguration and updates it. Returns the server's representation of the validatingWebhookConfiguration, and an error, if there is any.
-func (c *validatingWebhookConfigurations) Update(validatingWebhookConfiguration *v1beta1.ValidatingWebhookConfiguration) (result *v1beta1.ValidatingWebhookConfiguration, err error) {
+func (c *validatingWebhookConfigurations) Update(ctx context.Context, validatingWebhookConfiguration *v1beta1.ValidatingWebhookConfiguration, opts v1.UpdateOptions) (result *v1beta1.ValidatingWebhookConfiguration, err error) {
 	result = &v1beta1.ValidatingWebhookConfiguration{}
 	err = c.client.Put().
 		Resource("validatingwebhookconfigurations").
 		Name(validatingWebhookConfiguration.Name).
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(validatingWebhookConfiguration).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Delete takes name of the validatingWebhookConfiguration and deletes it. Returns an error if one occurs.
-func (c *validatingWebhookConfigurations) Delete(name string, options *v1.DeleteOptions) error {
+func (c *validatingWebhookConfigurations) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
 	return c.client.Delete().
 		Resource("validatingwebhookconfigurations").
 		Name(name).
-		Body(options).
-		Do().
+		Body(&opts).
+		Do(ctx).
 		Error()
 }
 
 // DeleteCollection deletes a collection of objects.
-func (c *validatingWebhookConfigurations) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
+func (c *validatingWebhookConfigurations) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
 	var timeout time.Duration
-	if listOptions.TimeoutSeconds != nil {
-		timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second
+	if listOpts.TimeoutSeconds != nil {
+		timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
 	}
 	return c.client.Delete().
 		Resource("validatingwebhookconfigurations").
-		VersionedParams(&listOptions, scheme.ParameterCodec).
+		VersionedParams(&listOpts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Body(options).
-		Do().
+		Body(&opts).
+		Do(ctx).
 		Error()
 }
 
 // Patch applies the patch and returns the patched validatingWebhookConfiguration.
-func (c *validatingWebhookConfigurations) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.ValidatingWebhookConfiguration, err error) {
+func (c *validatingWebhookConfigurations) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.ValidatingWebhookConfiguration, err error) {
 	result = &v1beta1.ValidatingWebhookConfiguration{}
 	err = c.client.Patch(pt).
 		Resource("validatingwebhookconfigurations").
-		SubResource(subresources...).
 		Name(name).
+		SubResource(subresources...).
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(data).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/controllerrevision.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/controllerrevision.go
index e28e4d2a..dba06207 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/controllerrevision.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/controllerrevision.go
@@ -19,6 +19,7 @@ limitations under the License.
 package v1
 
 import (
+	"context"
 	"time"
 
 	v1 "k8s.io/api/apps/v1"
@@ -37,14 +38,14 @@ type ControllerRevisionsGetter interface {
 
 // ControllerRevisionInterface has methods to work with ControllerRevision resources.
 type ControllerRevisionInterface interface {
-	Create(*v1.ControllerRevision) (*v1.ControllerRevision, error)
-	Update(*v1.ControllerRevision) (*v1.ControllerRevision, error)
-	Delete(name string, options *metav1.DeleteOptions) error
-	DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error
-	Get(name string, options metav1.GetOptions) (*v1.ControllerRevision, error)
-	List(opts metav1.ListOptions) (*v1.ControllerRevisionList, error)
-	Watch(opts metav1.ListOptions) (watch.Interface, error)
-	Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.ControllerRevision, err error)
+	Create(ctx context.Context, controllerRevision *v1.ControllerRevision, opts metav1.CreateOptions) (*v1.ControllerRevision, error)
+	Update(ctx context.Context, controllerRevision *v1.ControllerRevision, opts metav1.UpdateOptions) (*v1.ControllerRevision, error)
+	Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error
+	DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error
+	Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.ControllerRevision, error)
+	List(ctx context.Context, opts metav1.ListOptions) (*v1.ControllerRevisionList, error)
+	Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error)
+	Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ControllerRevision, err error)
 	ControllerRevisionExpansion
 }
 
@@ -63,20 +64,20 @@ func newControllerRevisions(c *AppsV1Client, namespace string) *controllerRevisi
 }
 
 // Get takes name of the controllerRevision, and returns the corresponding controllerRevision object, and an error if there is any.
-func (c *controllerRevisions) Get(name string, options metav1.GetOptions) (result *v1.ControllerRevision, err error) {
+func (c *controllerRevisions) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.ControllerRevision, err error) {
 	result = &v1.ControllerRevision{}
 	err = c.client.Get().
 		Namespace(c.ns).
 		Resource("controllerrevisions").
 		Name(name).
 		VersionedParams(&options, scheme.ParameterCodec).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // List takes label and field selectors, and returns the list of ControllerRevisions that match those selectors.
-func (c *controllerRevisions) List(opts metav1.ListOptions) (result *v1.ControllerRevisionList, err error) {
+func (c *controllerRevisions) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ControllerRevisionList, err error) {
 	var timeout time.Duration
 	if opts.TimeoutSeconds != nil {
 		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
@@ -87,13 +88,13 @@ func (c *controllerRevisions) List(opts metav1.ListOptions) (result *v1.Controll
 		Resource("controllerrevisions").
 		VersionedParams(&opts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Watch returns a watch.Interface that watches the requested controllerRevisions.
-func (c *controllerRevisions) Watch(opts metav1.ListOptions) (watch.Interface, error) {
+func (c *controllerRevisions) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
 	var timeout time.Duration
 	if opts.TimeoutSeconds != nil {
 		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
@@ -104,71 +105,74 @@ func (c *controllerRevisions) Watch(opts metav1.ListOptions) (watch.Interface, e
 		Resource("controllerrevisions").
 		VersionedParams(&opts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Watch()
+		Watch(ctx)
 }
 
 // Create takes the representation of a controllerRevision and creates it.  Returns the server's representation of the controllerRevision, and an error, if there is any.
-func (c *controllerRevisions) Create(controllerRevision *v1.ControllerRevision) (result *v1.ControllerRevision, err error) {
+func (c *controllerRevisions) Create(ctx context.Context, controllerRevision *v1.ControllerRevision, opts metav1.CreateOptions) (result *v1.ControllerRevision, err error) {
 	result = &v1.ControllerRevision{}
 	err = c.client.Post().
 		Namespace(c.ns).
 		Resource("controllerrevisions").
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(controllerRevision).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Update takes the representation of a controllerRevision and updates it. Returns the server's representation of the controllerRevision, and an error, if there is any.
-func (c *controllerRevisions) Update(controllerRevision *v1.ControllerRevision) (result *v1.ControllerRevision, err error) {
+func (c *controllerRevisions) Update(ctx context.Context, controllerRevision *v1.ControllerRevision, opts metav1.UpdateOptions) (result *v1.ControllerRevision, err error) {
 	result = &v1.ControllerRevision{}
 	err = c.client.Put().
 		Namespace(c.ns).
 		Resource("controllerrevisions").
 		Name(controllerRevision.Name).
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(controllerRevision).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Delete takes name of the controllerRevision and deletes it. Returns an error if one occurs.
-func (c *controllerRevisions) Delete(name string, options *metav1.DeleteOptions) error {
+func (c *controllerRevisions) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error {
 	return c.client.Delete().
 		Namespace(c.ns).
 		Resource("controllerrevisions").
 		Name(name).
-		Body(options).
-		Do().
+		Body(&opts).
+		Do(ctx).
 		Error()
 }
 
 // DeleteCollection deletes a collection of objects.
-func (c *controllerRevisions) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error {
+func (c *controllerRevisions) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error {
 	var timeout time.Duration
-	if listOptions.TimeoutSeconds != nil {
-		timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second
+	if listOpts.TimeoutSeconds != nil {
+		timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
 	}
 	return c.client.Delete().
 		Namespace(c.ns).
 		Resource("controllerrevisions").
-		VersionedParams(&listOptions, scheme.ParameterCodec).
+		VersionedParams(&listOpts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Body(options).
-		Do().
+		Body(&opts).
+		Do(ctx).
 		Error()
 }
 
 // Patch applies the patch and returns the patched controllerRevision.
-func (c *controllerRevisions) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.ControllerRevision, err error) {
+func (c *controllerRevisions) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ControllerRevision, err error) {
 	result = &v1.ControllerRevision{}
 	err = c.client.Patch(pt).
 		Namespace(c.ns).
 		Resource("controllerrevisions").
-		SubResource(subresources...).
 		Name(name).
+		SubResource(subresources...).
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(data).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/daemonset.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/daemonset.go
index a535cdab..0bb397af 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/daemonset.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/daemonset.go
@@ -19,6 +19,7 @@ limitations under the License.
 package v1
 
 import (
+	"context"
 	"time"
 
 	v1 "k8s.io/api/apps/v1"
@@ -37,15 +38,15 @@ type DaemonSetsGetter interface {
 
 // DaemonSetInterface has methods to work with DaemonSet resources.
 type DaemonSetInterface interface {
-	Create(*v1.DaemonSet) (*v1.DaemonSet, error)
-	Update(*v1.DaemonSet) (*v1.DaemonSet, error)
-	UpdateStatus(*v1.DaemonSet) (*v1.DaemonSet, error)
-	Delete(name string, options *metav1.DeleteOptions) error
-	DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error
-	Get(name string, options metav1.GetOptions) (*v1.DaemonSet, error)
-	List(opts metav1.ListOptions) (*v1.DaemonSetList, error)
-	Watch(opts metav1.ListOptions) (watch.Interface, error)
-	Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.DaemonSet, err error)
+	Create(ctx context.Context, daemonSet *v1.DaemonSet, opts metav1.CreateOptions) (*v1.DaemonSet, error)
+	Update(ctx context.Context, daemonSet *v1.DaemonSet, opts metav1.UpdateOptions) (*v1.DaemonSet, error)
+	UpdateStatus(ctx context.Context, daemonSet *v1.DaemonSet, opts metav1.UpdateOptions) (*v1.DaemonSet, error)
+	Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error
+	DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error
+	Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.DaemonSet, error)
+	List(ctx context.Context, opts metav1.ListOptions) (*v1.DaemonSetList, error)
+	Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error)
+	Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.DaemonSet, err error)
 	DaemonSetExpansion
 }
 
@@ -64,20 +65,20 @@ func newDaemonSets(c *AppsV1Client, namespace string) *daemonSets {
 }
 
 // Get takes name of the daemonSet, and returns the corresponding daemonSet object, and an error if there is any.
-func (c *daemonSets) Get(name string, options metav1.GetOptions) (result *v1.DaemonSet, err error) {
+func (c *daemonSets) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.DaemonSet, err error) {
 	result = &v1.DaemonSet{}
 	err = c.client.Get().
 		Namespace(c.ns).
 		Resource("daemonsets").
 		Name(name).
 		VersionedParams(&options, scheme.ParameterCodec).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // List takes label and field selectors, and returns the list of DaemonSets that match those selectors.
-func (c *daemonSets) List(opts metav1.ListOptions) (result *v1.DaemonSetList, err error) {
+func (c *daemonSets) List(ctx context.Context, opts metav1.ListOptions) (result *v1.DaemonSetList, err error) {
 	var timeout time.Duration
 	if opts.TimeoutSeconds != nil {
 		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
@@ -88,13 +89,13 @@ func (c *daemonSets) List(opts metav1.ListOptions) (result *v1.DaemonSetList, er
 		Resource("daemonsets").
 		VersionedParams(&opts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Watch returns a watch.Interface that watches the requested daemonSets.
-func (c *daemonSets) Watch(opts metav1.ListOptions) (watch.Interface, error) {
+func (c *daemonSets) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
 	var timeout time.Duration
 	if opts.TimeoutSeconds != nil {
 		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
@@ -105,87 +106,90 @@ func (c *daemonSets) Watch(opts metav1.ListOptions) (watch.Interface, error) {
 		Resource("daemonsets").
 		VersionedParams(&opts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Watch()
+		Watch(ctx)
 }
 
 // Create takes the representation of a daemonSet and creates it.  Returns the server's representation of the daemonSet, and an error, if there is any.
-func (c *daemonSets) Create(daemonSet *v1.DaemonSet) (result *v1.DaemonSet, err error) {
+func (c *daemonSets) Create(ctx context.Context, daemonSet *v1.DaemonSet, opts metav1.CreateOptions) (result *v1.DaemonSet, err error) {
 	result = &v1.DaemonSet{}
 	err = c.client.Post().
 		Namespace(c.ns).
 		Resource("daemonsets").
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(daemonSet).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Update takes the representation of a daemonSet and updates it. Returns the server's representation of the daemonSet, and an error, if there is any.
-func (c *daemonSets) Update(daemonSet *v1.DaemonSet) (result *v1.DaemonSet, err error) {
+func (c *daemonSets) Update(ctx context.Context, daemonSet *v1.DaemonSet, opts metav1.UpdateOptions) (result *v1.DaemonSet, err error) {
 	result = &v1.DaemonSet{}
 	err = c.client.Put().
 		Namespace(c.ns).
 		Resource("daemonsets").
 		Name(daemonSet.Name).
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(daemonSet).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // UpdateStatus was generated because the type contains a Status member.
 // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
-
-func (c *daemonSets) UpdateStatus(daemonSet *v1.DaemonSet) (result *v1.DaemonSet, err error) {
+func (c *daemonSets) UpdateStatus(ctx context.Context, daemonSet *v1.DaemonSet, opts metav1.UpdateOptions) (result *v1.DaemonSet, err error) {
 	result = &v1.DaemonSet{}
 	err = c.client.Put().
 		Namespace(c.ns).
 		Resource("daemonsets").
 		Name(daemonSet.Name).
 		SubResource("status").
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(daemonSet).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Delete takes name of the daemonSet and deletes it. Returns an error if one occurs.
-func (c *daemonSets) Delete(name string, options *metav1.DeleteOptions) error {
+func (c *daemonSets) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error {
 	return c.client.Delete().
 		Namespace(c.ns).
 		Resource("daemonsets").
 		Name(name).
-		Body(options).
-		Do().
+		Body(&opts).
+		Do(ctx).
 		Error()
 }
 
 // DeleteCollection deletes a collection of objects.
-func (c *daemonSets) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error {
+func (c *daemonSets) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error {
 	var timeout time.Duration
-	if listOptions.TimeoutSeconds != nil {
-		timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second
+	if listOpts.TimeoutSeconds != nil {
+		timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
 	}
 	return c.client.Delete().
 		Namespace(c.ns).
 		Resource("daemonsets").
-		VersionedParams(&listOptions, scheme.ParameterCodec).
+		VersionedParams(&listOpts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Body(options).
-		Do().
+		Body(&opts).
+		Do(ctx).
 		Error()
 }
 
 // Patch applies the patch and returns the patched daemonSet.
-func (c *daemonSets) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.DaemonSet, err error) {
+func (c *daemonSets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.DaemonSet, err error) {
 	result = &v1.DaemonSet{}
 	err = c.client.Patch(pt).
 		Namespace(c.ns).
 		Resource("daemonsets").
-		SubResource(subresources...).
 		Name(name).
+		SubResource(subresources...).
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(data).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/deployment.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/deployment.go
index f9799a45..69d1b86d 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/deployment.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/deployment.go
@@ -19,6 +19,7 @@ limitations under the License.
 package v1
 
 import (
+	"context"
 	"time"
 
 	v1 "k8s.io/api/apps/v1"
@@ -38,17 +39,17 @@ type DeploymentsGetter interface {
 
 // DeploymentInterface has methods to work with Deployment resources.
 type DeploymentInterface interface {
-	Create(*v1.Deployment) (*v1.Deployment, error)
-	Update(*v1.Deployment) (*v1.Deployment, error)
-	UpdateStatus(*v1.Deployment) (*v1.Deployment, error)
-	Delete(name string, options *metav1.DeleteOptions) error
-	DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error
-	Get(name string, options metav1.GetOptions) (*v1.Deployment, error)
-	List(opts metav1.ListOptions) (*v1.DeploymentList, error)
-	Watch(opts metav1.ListOptions) (watch.Interface, error)
-	Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Deployment, err error)
-	GetScale(deploymentName string, options metav1.GetOptions) (*autoscalingv1.Scale, error)
-	UpdateScale(deploymentName string, scale *autoscalingv1.Scale) (*autoscalingv1.Scale, error)
+	Create(ctx context.Context, deployment *v1.Deployment, opts metav1.CreateOptions) (*v1.Deployment, error)
+	Update(ctx context.Context, deployment *v1.Deployment, opts metav1.UpdateOptions) (*v1.Deployment, error)
+	UpdateStatus(ctx context.Context, deployment *v1.Deployment, opts metav1.UpdateOptions) (*v1.Deployment, error)
+	Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error
+	DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error
+	Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.Deployment, error)
+	List(ctx context.Context, opts metav1.ListOptions) (*v1.DeploymentList, error)
+	Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error)
+	Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Deployment, err error)
+	GetScale(ctx context.Context, deploymentName string, options metav1.GetOptions) (*autoscalingv1.Scale, error)
+	UpdateScale(ctx context.Context, deploymentName string, scale *autoscalingv1.Scale, opts metav1.UpdateOptions) (*autoscalingv1.Scale, error)
 
 	DeploymentExpansion
 }
@@ -68,20 +69,20 @@ func newDeployments(c *AppsV1Client, namespace string) *deployments {
 }
 
 // Get takes name of the deployment, and returns the corresponding deployment object, and an error if there is any.
-func (c *deployments) Get(name string, options metav1.GetOptions) (result *v1.Deployment, err error) {
+func (c *deployments) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Deployment, err error) {
 	result = &v1.Deployment{}
 	err = c.client.Get().
 		Namespace(c.ns).
 		Resource("deployments").
 		Name(name).
 		VersionedParams(&options, scheme.ParameterCodec).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // List takes label and field selectors, and returns the list of Deployments that match those selectors.
-func (c *deployments) List(opts metav1.ListOptions) (result *v1.DeploymentList, err error) {
+func (c *deployments) List(ctx context.Context, opts metav1.ListOptions) (result *v1.DeploymentList, err error) {
 	var timeout time.Duration
 	if opts.TimeoutSeconds != nil {
 		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
@@ -92,13 +93,13 @@ func (c *deployments) List(opts metav1.ListOptions) (result *v1.DeploymentList,
 		Resource("deployments").
 		VersionedParams(&opts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Watch returns a watch.Interface that watches the requested deployments.
-func (c *deployments) Watch(opts metav1.ListOptions) (watch.Interface, error) {
+func (c *deployments) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
 	var timeout time.Duration
 	if opts.TimeoutSeconds != nil {
 		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
@@ -109,93 +110,96 @@ func (c *deployments) Watch(opts metav1.ListOptions) (watch.Interface, error) {
 		Resource("deployments").
 		VersionedParams(&opts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Watch()
+		Watch(ctx)
 }
 
 // Create takes the representation of a deployment and creates it.  Returns the server's representation of the deployment, and an error, if there is any.
-func (c *deployments) Create(deployment *v1.Deployment) (result *v1.Deployment, err error) {
+func (c *deployments) Create(ctx context.Context, deployment *v1.Deployment, opts metav1.CreateOptions) (result *v1.Deployment, err error) {
 	result = &v1.Deployment{}
 	err = c.client.Post().
 		Namespace(c.ns).
 		Resource("deployments").
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(deployment).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Update takes the representation of a deployment and updates it. Returns the server's representation of the deployment, and an error, if there is any.
-func (c *deployments) Update(deployment *v1.Deployment) (result *v1.Deployment, err error) {
+func (c *deployments) Update(ctx context.Context, deployment *v1.Deployment, opts metav1.UpdateOptions) (result *v1.Deployment, err error) {
 	result = &v1.Deployment{}
 	err = c.client.Put().
 		Namespace(c.ns).
 		Resource("deployments").
 		Name(deployment.Name).
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(deployment).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // UpdateStatus was generated because the type contains a Status member.
 // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
-
-func (c *deployments) UpdateStatus(deployment *v1.Deployment) (result *v1.Deployment, err error) {
+func (c *deployments) UpdateStatus(ctx context.Context, deployment *v1.Deployment, opts metav1.UpdateOptions) (result *v1.Deployment, err error) {
 	result = &v1.Deployment{}
 	err = c.client.Put().
 		Namespace(c.ns).
 		Resource("deployments").
 		Name(deployment.Name).
 		SubResource("status").
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(deployment).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Delete takes name of the deployment and deletes it. Returns an error if one occurs.
-func (c *deployments) Delete(name string, options *metav1.DeleteOptions) error {
+func (c *deployments) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error {
 	return c.client.Delete().
 		Namespace(c.ns).
 		Resource("deployments").
 		Name(name).
-		Body(options).
-		Do().
+		Body(&opts).
+		Do(ctx).
 		Error()
 }
 
 // DeleteCollection deletes a collection of objects.
-func (c *deployments) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error {
+func (c *deployments) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error {
 	var timeout time.Duration
-	if listOptions.TimeoutSeconds != nil {
-		timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second
+	if listOpts.TimeoutSeconds != nil {
+		timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
 	}
 	return c.client.Delete().
 		Namespace(c.ns).
 		Resource("deployments").
-		VersionedParams(&listOptions, scheme.ParameterCodec).
+		VersionedParams(&listOpts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Body(options).
-		Do().
+		Body(&opts).
+		Do(ctx).
 		Error()
 }
 
 // Patch applies the patch and returns the patched deployment.
-func (c *deployments) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Deployment, err error) {
+func (c *deployments) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Deployment, err error) {
 	result = &v1.Deployment{}
 	err = c.client.Patch(pt).
 		Namespace(c.ns).
 		Resource("deployments").
-		SubResource(subresources...).
 		Name(name).
+		SubResource(subresources...).
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(data).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // GetScale takes name of the deployment, and returns the corresponding autoscalingv1.Scale object, and an error if there is any.
-func (c *deployments) GetScale(deploymentName string, options metav1.GetOptions) (result *autoscalingv1.Scale, err error) {
+func (c *deployments) GetScale(ctx context.Context, deploymentName string, options metav1.GetOptions) (result *autoscalingv1.Scale, err error) {
 	result = &autoscalingv1.Scale{}
 	err = c.client.Get().
 		Namespace(c.ns).
@@ -203,21 +207,22 @@ func (c *deployments) GetScale(deploymentName string, options metav1.GetOptions)
 		Name(deploymentName).
 		SubResource("scale").
 		VersionedParams(&options, scheme.ParameterCodec).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // UpdateScale takes the top resource name and the representation of a scale and updates it. Returns the server's representation of the scale, and an error, if there is any.
-func (c *deployments) UpdateScale(deploymentName string, scale *autoscalingv1.Scale) (result *autoscalingv1.Scale, err error) {
+func (c *deployments) UpdateScale(ctx context.Context, deploymentName string, scale *autoscalingv1.Scale, opts metav1.UpdateOptions) (result *autoscalingv1.Scale, err error) {
 	result = &autoscalingv1.Scale{}
 	err = c.client.Put().
 		Namespace(c.ns).
 		Resource("deployments").
 		Name(deploymentName).
 		SubResource("scale").
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(scale).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/replicaset.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/replicaset.go
index ff3504e7..377b9ca3 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/replicaset.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/replicaset.go
@@ -19,6 +19,7 @@ limitations under the License.
 package v1
 
 import (
+	"context"
 	"time"
 
 	v1 "k8s.io/api/apps/v1"
@@ -38,17 +39,17 @@ type ReplicaSetsGetter interface {
 
 // ReplicaSetInterface has methods to work with ReplicaSet resources.
 type ReplicaSetInterface interface {
-	Create(*v1.ReplicaSet) (*v1.ReplicaSet, error)
-	Update(*v1.ReplicaSet) (*v1.ReplicaSet, error)
-	UpdateStatus(*v1.ReplicaSet) (*v1.ReplicaSet, error)
-	Delete(name string, options *metav1.DeleteOptions) error
-	DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error
-	Get(name string, options metav1.GetOptions) (*v1.ReplicaSet, error)
-	List(opts metav1.ListOptions) (*v1.ReplicaSetList, error)
-	Watch(opts metav1.ListOptions) (watch.Interface, error)
-	Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.ReplicaSet, err error)
-	GetScale(replicaSetName string, options metav1.GetOptions) (*autoscalingv1.Scale, error)
-	UpdateScale(replicaSetName string, scale *autoscalingv1.Scale) (*autoscalingv1.Scale, error)
+	Create(ctx context.Context, replicaSet *v1.ReplicaSet, opts metav1.CreateOptions) (*v1.ReplicaSet, error)
+	Update(ctx context.Context, replicaSet *v1.ReplicaSet, opts metav1.UpdateOptions) (*v1.ReplicaSet, error)
+	UpdateStatus(ctx context.Context, replicaSet *v1.ReplicaSet, opts metav1.UpdateOptions) (*v1.ReplicaSet, error)
+	Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error
+	DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error
+	Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.ReplicaSet, error)
+	List(ctx context.Context, opts metav1.ListOptions) (*v1.ReplicaSetList, error)
+	Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error)
+	Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ReplicaSet, err error)
+	GetScale(ctx context.Context, replicaSetName string, options metav1.GetOptions) (*autoscalingv1.Scale, error)
+	UpdateScale(ctx context.Context, replicaSetName string, scale *autoscalingv1.Scale, opts metav1.UpdateOptions) (*autoscalingv1.Scale, error)
 
 	ReplicaSetExpansion
 }
@@ -68,20 +69,20 @@ func newReplicaSets(c *AppsV1Client, namespace string) *replicaSets {
 }
 
 // Get takes name of the replicaSet, and returns the corresponding replicaSet object, and an error if there is any.
-func (c *replicaSets) Get(name string, options metav1.GetOptions) (result *v1.ReplicaSet, err error) {
+func (c *replicaSets) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.ReplicaSet, err error) {
 	result = &v1.ReplicaSet{}
 	err = c.client.Get().
 		Namespace(c.ns).
 		Resource("replicasets").
 		Name(name).
 		VersionedParams(&options, scheme.ParameterCodec).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // List takes label and field selectors, and returns the list of ReplicaSets that match those selectors.
-func (c *replicaSets) List(opts metav1.ListOptions) (result *v1.ReplicaSetList, err error) {
+func (c *replicaSets) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ReplicaSetList, err error) {
 	var timeout time.Duration
 	if opts.TimeoutSeconds != nil {
 		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
@@ -92,13 +93,13 @@ func (c *replicaSets) List(opts metav1.ListOptions) (result *v1.ReplicaSetList,
 		Resource("replicasets").
 		VersionedParams(&opts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Watch returns a watch.Interface that watches the requested replicaSets.
-func (c *replicaSets) Watch(opts metav1.ListOptions) (watch.Interface, error) {
+func (c *replicaSets) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
 	var timeout time.Duration
 	if opts.TimeoutSeconds != nil {
 		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
@@ -109,93 +110,96 @@ func (c *replicaSets) Watch(opts metav1.ListOptions) (watch.Interface, error) {
 		Resource("replicasets").
 		VersionedParams(&opts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Watch()
+		Watch(ctx)
 }
 
 // Create takes the representation of a replicaSet and creates it.  Returns the server's representation of the replicaSet, and an error, if there is any.
-func (c *replicaSets) Create(replicaSet *v1.ReplicaSet) (result *v1.ReplicaSet, err error) {
+func (c *replicaSets) Create(ctx context.Context, replicaSet *v1.ReplicaSet, opts metav1.CreateOptions) (result *v1.ReplicaSet, err error) {
 	result = &v1.ReplicaSet{}
 	err = c.client.Post().
 		Namespace(c.ns).
 		Resource("replicasets").
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(replicaSet).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Update takes the representation of a replicaSet and updates it. Returns the server's representation of the replicaSet, and an error, if there is any.
-func (c *replicaSets) Update(replicaSet *v1.ReplicaSet) (result *v1.ReplicaSet, err error) {
+func (c *replicaSets) Update(ctx context.Context, replicaSet *v1.ReplicaSet, opts metav1.UpdateOptions) (result *v1.ReplicaSet, err error) {
 	result = &v1.ReplicaSet{}
 	err = c.client.Put().
 		Namespace(c.ns).
 		Resource("replicasets").
 		Name(replicaSet.Name).
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(replicaSet).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // UpdateStatus was generated because the type contains a Status member.
 // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
-
-func (c *replicaSets) UpdateStatus(replicaSet *v1.ReplicaSet) (result *v1.ReplicaSet, err error) {
+func (c *replicaSets) UpdateStatus(ctx context.Context, replicaSet *v1.ReplicaSet, opts metav1.UpdateOptions) (result *v1.ReplicaSet, err error) {
 	result = &v1.ReplicaSet{}
 	err = c.client.Put().
 		Namespace(c.ns).
 		Resource("replicasets").
 		Name(replicaSet.Name).
 		SubResource("status").
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(replicaSet).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Delete takes name of the replicaSet and deletes it. Returns an error if one occurs.
-func (c *replicaSets) Delete(name string, options *metav1.DeleteOptions) error {
+func (c *replicaSets) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error {
 	return c.client.Delete().
 		Namespace(c.ns).
 		Resource("replicasets").
 		Name(name).
-		Body(options).
-		Do().
+		Body(&opts).
+		Do(ctx).
 		Error()
 }
 
 // DeleteCollection deletes a collection of objects.
-func (c *replicaSets) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error {
+func (c *replicaSets) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error {
 	var timeout time.Duration
-	if listOptions.TimeoutSeconds != nil {
-		timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second
+	if listOpts.TimeoutSeconds != nil {
+		timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
 	}
 	return c.client.Delete().
 		Namespace(c.ns).
 		Resource("replicasets").
-		VersionedParams(&listOptions, scheme.ParameterCodec).
+		VersionedParams(&listOpts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Body(options).
-		Do().
+		Body(&opts).
+		Do(ctx).
 		Error()
 }
 
 // Patch applies the patch and returns the patched replicaSet.
-func (c *replicaSets) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.ReplicaSet, err error) {
+func (c *replicaSets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ReplicaSet, err error) {
 	result = &v1.ReplicaSet{}
 	err = c.client.Patch(pt).
 		Namespace(c.ns).
 		Resource("replicasets").
-		SubResource(subresources...).
 		Name(name).
+		SubResource(subresources...).
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(data).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // GetScale takes name of the replicaSet, and returns the corresponding autoscalingv1.Scale object, and an error if there is any.
-func (c *replicaSets) GetScale(replicaSetName string, options metav1.GetOptions) (result *autoscalingv1.Scale, err error) {
+func (c *replicaSets) GetScale(ctx context.Context, replicaSetName string, options metav1.GetOptions) (result *autoscalingv1.Scale, err error) {
 	result = &autoscalingv1.Scale{}
 	err = c.client.Get().
 		Namespace(c.ns).
@@ -203,21 +207,22 @@ func (c *replicaSets) GetScale(replicaSetName string, options metav1.GetOptions)
 		Name(replicaSetName).
 		SubResource("scale").
 		VersionedParams(&options, scheme.ParameterCodec).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // UpdateScale takes the top resource name and the representation of a scale and updates it. Returns the server's representation of the scale, and an error, if there is any.
-func (c *replicaSets) UpdateScale(replicaSetName string, scale *autoscalingv1.Scale) (result *autoscalingv1.Scale, err error) {
+func (c *replicaSets) UpdateScale(ctx context.Context, replicaSetName string, scale *autoscalingv1.Scale, opts metav1.UpdateOptions) (result *autoscalingv1.Scale, err error) {
 	result = &autoscalingv1.Scale{}
 	err = c.client.Put().
 		Namespace(c.ns).
 		Resource("replicasets").
 		Name(replicaSetName).
 		SubResource("scale").
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(scale).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/statefulset.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/statefulset.go
index c12c470b..33a9f535 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/statefulset.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/statefulset.go
@@ -19,6 +19,7 @@ limitations under the License.
 package v1
 
 import (
+	"context"
 	"time"
 
 	v1 "k8s.io/api/apps/v1"
@@ -38,17 +39,17 @@ type StatefulSetsGetter interface {
 
 // StatefulSetInterface has methods to work with StatefulSet resources.
 type StatefulSetInterface interface {
-	Create(*v1.StatefulSet) (*v1.StatefulSet, error)
-	Update(*v1.StatefulSet) (*v1.StatefulSet, error)
-	UpdateStatus(*v1.StatefulSet) (*v1.StatefulSet, error)
-	Delete(name string, options *metav1.DeleteOptions) error
-	DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error
-	Get(name string, options metav1.GetOptions) (*v1.StatefulSet, error)
-	List(opts metav1.ListOptions) (*v1.StatefulSetList, error)
-	Watch(opts metav1.ListOptions) (watch.Interface, error)
-	Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.StatefulSet, err error)
-	GetScale(statefulSetName string, options metav1.GetOptions) (*autoscalingv1.Scale, error)
-	UpdateScale(statefulSetName string, scale *autoscalingv1.Scale) (*autoscalingv1.Scale, error)
+	Create(ctx context.Context, statefulSet *v1.StatefulSet, opts metav1.CreateOptions) (*v1.StatefulSet, error)
+	Update(ctx context.Context, statefulSet *v1.StatefulSet, opts metav1.UpdateOptions) (*v1.StatefulSet, error)
+	UpdateStatus(ctx context.Context, statefulSet *v1.StatefulSet, opts metav1.UpdateOptions) (*v1.StatefulSet, error)
+	Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error
+	DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error
+	Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.StatefulSet, error)
+	List(ctx context.Context, opts metav1.ListOptions) (*v1.StatefulSetList, error)
+	Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error)
+	Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.StatefulSet, err error)
+	GetScale(ctx context.Context, statefulSetName string, options metav1.GetOptions) (*autoscalingv1.Scale, error)
+	UpdateScale(ctx context.Context, statefulSetName string, scale *autoscalingv1.Scale, opts metav1.UpdateOptions) (*autoscalingv1.Scale, error)
 
 	StatefulSetExpansion
 }
@@ -68,20 +69,20 @@ func newStatefulSets(c *AppsV1Client, namespace string) *statefulSets {
 }
 
 // Get takes name of the statefulSet, and returns the corresponding statefulSet object, and an error if there is any.
-func (c *statefulSets) Get(name string, options metav1.GetOptions) (result *v1.StatefulSet, err error) {
+func (c *statefulSets) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.StatefulSet, err error) {
 	result = &v1.StatefulSet{}
 	err = c.client.Get().
 		Namespace(c.ns).
 		Resource("statefulsets").
 		Name(name).
 		VersionedParams(&options, scheme.ParameterCodec).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // List takes label and field selectors, and returns the list of StatefulSets that match those selectors.
-func (c *statefulSets) List(opts metav1.ListOptions) (result *v1.StatefulSetList, err error) {
+func (c *statefulSets) List(ctx context.Context, opts metav1.ListOptions) (result *v1.StatefulSetList, err error) {
 	var timeout time.Duration
 	if opts.TimeoutSeconds != nil {
 		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
@@ -92,13 +93,13 @@ func (c *statefulSets) List(opts metav1.ListOptions) (result *v1.StatefulSetList
 		Resource("statefulsets").
 		VersionedParams(&opts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Watch returns a watch.Interface that watches the requested statefulSets.
-func (c *statefulSets) Watch(opts metav1.ListOptions) (watch.Interface, error) {
+func (c *statefulSets) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
 	var timeout time.Duration
 	if opts.TimeoutSeconds != nil {
 		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
@@ -109,93 +110,96 @@ func (c *statefulSets) Watch(opts metav1.ListOptions) (watch.Interface, error) {
 		Resource("statefulsets").
 		VersionedParams(&opts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Watch()
+		Watch(ctx)
 }
 
 // Create takes the representation of a statefulSet and creates it.  Returns the server's representation of the statefulSet, and an error, if there is any.
-func (c *statefulSets) Create(statefulSet *v1.StatefulSet) (result *v1.StatefulSet, err error) {
+func (c *statefulSets) Create(ctx context.Context, statefulSet *v1.StatefulSet, opts metav1.CreateOptions) (result *v1.StatefulSet, err error) {
 	result = &v1.StatefulSet{}
 	err = c.client.Post().
 		Namespace(c.ns).
 		Resource("statefulsets").
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(statefulSet).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Update takes the representation of a statefulSet and updates it. Returns the server's representation of the statefulSet, and an error, if there is any.
-func (c *statefulSets) Update(statefulSet *v1.StatefulSet) (result *v1.StatefulSet, err error) {
+func (c *statefulSets) Update(ctx context.Context, statefulSet *v1.StatefulSet, opts metav1.UpdateOptions) (result *v1.StatefulSet, err error) {
 	result = &v1.StatefulSet{}
 	err = c.client.Put().
 		Namespace(c.ns).
 		Resource("statefulsets").
 		Name(statefulSet.Name).
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(statefulSet).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // UpdateStatus was generated because the type contains a Status member.
 // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
-
-func (c *statefulSets) UpdateStatus(statefulSet *v1.StatefulSet) (result *v1.StatefulSet, err error) {
+func (c *statefulSets) UpdateStatus(ctx context.Context, statefulSet *v1.StatefulSet, opts metav1.UpdateOptions) (result *v1.StatefulSet, err error) {
 	result = &v1.StatefulSet{}
 	err = c.client.Put().
 		Namespace(c.ns).
 		Resource("statefulsets").
 		Name(statefulSet.Name).
 		SubResource("status").
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(statefulSet).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Delete takes name of the statefulSet and deletes it. Returns an error if one occurs.
-func (c *statefulSets) Delete(name string, options *metav1.DeleteOptions) error {
+func (c *statefulSets) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error {
 	return c.client.Delete().
 		Namespace(c.ns).
 		Resource("statefulsets").
 		Name(name).
-		Body(options).
-		Do().
+		Body(&opts).
+		Do(ctx).
 		Error()
 }
 
 // DeleteCollection deletes a collection of objects.
-func (c *statefulSets) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error {
+func (c *statefulSets) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error {
 	var timeout time.Duration
-	if listOptions.TimeoutSeconds != nil {
-		timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second
+	if listOpts.TimeoutSeconds != nil {
+		timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
 	}
 	return c.client.Delete().
 		Namespace(c.ns).
 		Resource("statefulsets").
-		VersionedParams(&listOptions, scheme.ParameterCodec).
+		VersionedParams(&listOpts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Body(options).
-		Do().
+		Body(&opts).
+		Do(ctx).
 		Error()
 }
 
 // Patch applies the patch and returns the patched statefulSet.
-func (c *statefulSets) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.StatefulSet, err error) {
+func (c *statefulSets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.StatefulSet, err error) {
 	result = &v1.StatefulSet{}
 	err = c.client.Patch(pt).
 		Namespace(c.ns).
 		Resource("statefulsets").
-		SubResource(subresources...).
 		Name(name).
+		SubResource(subresources...).
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(data).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // GetScale takes name of the statefulSet, and returns the corresponding autoscalingv1.Scale object, and an error if there is any.
-func (c *statefulSets) GetScale(statefulSetName string, options metav1.GetOptions) (result *autoscalingv1.Scale, err error) {
+func (c *statefulSets) GetScale(ctx context.Context, statefulSetName string, options metav1.GetOptions) (result *autoscalingv1.Scale, err error) {
 	result = &autoscalingv1.Scale{}
 	err = c.client.Get().
 		Namespace(c.ns).
@@ -203,21 +207,22 @@ func (c *statefulSets) GetScale(statefulSetName string, options metav1.GetOption
 		Name(statefulSetName).
 		SubResource("scale").
 		VersionedParams(&options, scheme.ParameterCodec).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // UpdateScale takes the top resource name and the representation of a scale and updates it. Returns the server's representation of the scale, and an error, if there is any.
-func (c *statefulSets) UpdateScale(statefulSetName string, scale *autoscalingv1.Scale) (result *autoscalingv1.Scale, err error) {
+func (c *statefulSets) UpdateScale(ctx context.Context, statefulSetName string, scale *autoscalingv1.Scale, opts metav1.UpdateOptions) (result *autoscalingv1.Scale, err error) {
 	result = &autoscalingv1.Scale{}
 	err = c.client.Put().
 		Namespace(c.ns).
 		Resource("statefulsets").
 		Name(statefulSetName).
 		SubResource("scale").
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(scale).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/controllerrevision.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/controllerrevision.go
index 45ddb915..e247e07d 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/controllerrevision.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/controllerrevision.go
@@ -19,6 +19,7 @@ limitations under the License.
 package v1beta1
 
 import (
+	"context"
 	"time"
 
 	v1beta1 "k8s.io/api/apps/v1beta1"
@@ -37,14 +38,14 @@ type ControllerRevisionsGetter interface {
 
 // ControllerRevisionInterface has methods to work with ControllerRevision resources.
 type ControllerRevisionInterface interface {
-	Create(*v1beta1.ControllerRevision) (*v1beta1.ControllerRevision, error)
-	Update(*v1beta1.ControllerRevision) (*v1beta1.ControllerRevision, error)
-	Delete(name string, options *v1.DeleteOptions) error
-	DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error
-	Get(name string, options v1.GetOptions) (*v1beta1.ControllerRevision, error)
-	List(opts v1.ListOptions) (*v1beta1.ControllerRevisionList, error)
-	Watch(opts v1.ListOptions) (watch.Interface, error)
-	Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.ControllerRevision, err error)
+	Create(ctx context.Context, controllerRevision *v1beta1.ControllerRevision, opts v1.CreateOptions) (*v1beta1.ControllerRevision, error)
+	Update(ctx context.Context, controllerRevision *v1beta1.ControllerRevision, opts v1.UpdateOptions) (*v1beta1.ControllerRevision, error)
+	Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
+	DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
+	Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.ControllerRevision, error)
+	List(ctx context.Context, opts v1.ListOptions) (*v1beta1.ControllerRevisionList, error)
+	Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
+	Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.ControllerRevision, err error)
 	ControllerRevisionExpansion
 }
 
@@ -63,20 +64,20 @@ func newControllerRevisions(c *AppsV1beta1Client, namespace string) *controllerR
 }
 
 // Get takes name of the controllerRevision, and returns the corresponding controllerRevision object, and an error if there is any.
-func (c *controllerRevisions) Get(name string, options v1.GetOptions) (result *v1beta1.ControllerRevision, err error) {
+func (c *controllerRevisions) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.ControllerRevision, err error) {
 	result = &v1beta1.ControllerRevision{}
 	err = c.client.Get().
 		Namespace(c.ns).
 		Resource("controllerrevisions").
 		Name(name).
 		VersionedParams(&options, scheme.ParameterCodec).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // List takes label and field selectors, and returns the list of ControllerRevisions that match those selectors.
-func (c *controllerRevisions) List(opts v1.ListOptions) (result *v1beta1.ControllerRevisionList, err error) {
+func (c *controllerRevisions) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.ControllerRevisionList, err error) {
 	var timeout time.Duration
 	if opts.TimeoutSeconds != nil {
 		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
@@ -87,13 +88,13 @@ func (c *controllerRevisions) List(opts v1.ListOptions) (result *v1beta1.Control
 		Resource("controllerrevisions").
 		VersionedParams(&opts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Watch returns a watch.Interface that watches the requested controllerRevisions.
-func (c *controllerRevisions) Watch(opts v1.ListOptions) (watch.Interface, error) {
+func (c *controllerRevisions) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
 	var timeout time.Duration
 	if opts.TimeoutSeconds != nil {
 		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
@@ -104,71 +105,74 @@ func (c *controllerRevisions) Watch(opts v1.ListOptions) (watch.Interface, error
 		Resource("controllerrevisions").
 		VersionedParams(&opts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Watch()
+		Watch(ctx)
 }
 
 // Create takes the representation of a controllerRevision and creates it.  Returns the server's representation of the controllerRevision, and an error, if there is any.
-func (c *controllerRevisions) Create(controllerRevision *v1beta1.ControllerRevision) (result *v1beta1.ControllerRevision, err error) {
+func (c *controllerRevisions) Create(ctx context.Context, controllerRevision *v1beta1.ControllerRevision, opts v1.CreateOptions) (result *v1beta1.ControllerRevision, err error) {
 	result = &v1beta1.ControllerRevision{}
 	err = c.client.Post().
 		Namespace(c.ns).
 		Resource("controllerrevisions").
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(controllerRevision).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Update takes the representation of a controllerRevision and updates it. Returns the server's representation of the controllerRevision, and an error, if there is any.
-func (c *controllerRevisions) Update(controllerRevision *v1beta1.ControllerRevision) (result *v1beta1.ControllerRevision, err error) {
+func (c *controllerRevisions) Update(ctx context.Context, controllerRevision *v1beta1.ControllerRevision, opts v1.UpdateOptions) (result *v1beta1.ControllerRevision, err error) {
 	result = &v1beta1.ControllerRevision{}
 	err = c.client.Put().
 		Namespace(c.ns).
 		Resource("controllerrevisions").
 		Name(controllerRevision.Name).
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(controllerRevision).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Delete takes name of the controllerRevision and deletes it. Returns an error if one occurs.
-func (c *controllerRevisions) Delete(name string, options *v1.DeleteOptions) error {
+func (c *controllerRevisions) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
 	return c.client.Delete().
 		Namespace(c.ns).
 		Resource("controllerrevisions").
 		Name(name).
-		Body(options).
-		Do().
+		Body(&opts).
+		Do(ctx).
 		Error()
 }
 
 // DeleteCollection deletes a collection of objects.
-func (c *controllerRevisions) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
+func (c *controllerRevisions) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
 	var timeout time.Duration
-	if listOptions.TimeoutSeconds != nil {
-		timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second
+	if listOpts.TimeoutSeconds != nil {
+		timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
 	}
 	return c.client.Delete().
 		Namespace(c.ns).
 		Resource("controllerrevisions").
-		VersionedParams(&listOptions, scheme.ParameterCodec).
+		VersionedParams(&listOpts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Body(options).
-		Do().
+		Body(&opts).
+		Do(ctx).
 		Error()
 }
 
 // Patch applies the patch and returns the patched controllerRevision.
-func (c *controllerRevisions) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.ControllerRevision, err error) {
+func (c *controllerRevisions) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.ControllerRevision, err error) {
 	result = &v1beta1.ControllerRevision{}
 	err = c.client.Patch(pt).
 		Namespace(c.ns).
 		Resource("controllerrevisions").
-		SubResource(subresources...).
 		Name(name).
+		SubResource(subresources...).
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(data).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/deployment.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/deployment.go
index 05fdcb7a..dc0dad04 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/deployment.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/deployment.go
@@ -19,6 +19,7 @@ limitations under the License.
 package v1beta1
 
 import (
+	"context"
 	"time"
 
 	v1beta1 "k8s.io/api/apps/v1beta1"
@@ -37,15 +38,15 @@ type DeploymentsGetter interface {
 
 // DeploymentInterface has methods to work with Deployment resources.
 type DeploymentInterface interface {
-	Create(*v1beta1.Deployment) (*v1beta1.Deployment, error)
-	Update(*v1beta1.Deployment) (*v1beta1.Deployment, error)
-	UpdateStatus(*v1beta1.Deployment) (*v1beta1.Deployment, error)
-	Delete(name string, options *v1.DeleteOptions) error
-	DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error
-	Get(name string, options v1.GetOptions) (*v1beta1.Deployment, error)
-	List(opts v1.ListOptions) (*v1beta1.DeploymentList, error)
-	Watch(opts v1.ListOptions) (watch.Interface, error)
-	Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.Deployment, err error)
+	Create(ctx context.Context, deployment *v1beta1.Deployment, opts v1.CreateOptions) (*v1beta1.Deployment, error)
+	Update(ctx context.Context, deployment *v1beta1.Deployment, opts v1.UpdateOptions) (*v1beta1.Deployment, error)
+	UpdateStatus(ctx context.Context, deployment *v1beta1.Deployment, opts v1.UpdateOptions) (*v1beta1.Deployment, error)
+	Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
+	DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
+	Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.Deployment, error)
+	List(ctx context.Context, opts v1.ListOptions) (*v1beta1.DeploymentList, error)
+	Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
+	Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.Deployment, err error)
 	DeploymentExpansion
 }
 
@@ -64,20 +65,20 @@ func newDeployments(c *AppsV1beta1Client, namespace string) *deployments {
 }
 
 // Get takes name of the deployment, and returns the corresponding deployment object, and an error if there is any.
-func (c *deployments) Get(name string, options v1.GetOptions) (result *v1beta1.Deployment, err error) {
+func (c *deployments) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.Deployment, err error) {
 	result = &v1beta1.Deployment{}
 	err = c.client.Get().
 		Namespace(c.ns).
 		Resource("deployments").
 		Name(name).
 		VersionedParams(&options, scheme.ParameterCodec).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // List takes label and field selectors, and returns the list of Deployments that match those selectors.
-func (c *deployments) List(opts v1.ListOptions) (result *v1beta1.DeploymentList, err error) {
+func (c *deployments) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.DeploymentList, err error) {
 	var timeout time.Duration
 	if opts.TimeoutSeconds != nil {
 		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
@@ -88,13 +89,13 @@ func (c *deployments) List(opts v1.ListOptions) (result *v1beta1.DeploymentList,
 		Resource("deployments").
 		VersionedParams(&opts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Watch returns a watch.Interface that watches the requested deployments.
-func (c *deployments) Watch(opts v1.ListOptions) (watch.Interface, error) {
+func (c *deployments) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
 	var timeout time.Duration
 	if opts.TimeoutSeconds != nil {
 		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
@@ -105,87 +106,90 @@ func (c *deployments) Watch(opts v1.ListOptions) (watch.Interface, error) {
 		Resource("deployments").
 		VersionedParams(&opts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Watch()
+		Watch(ctx)
 }
 
 // Create takes the representation of a deployment and creates it.  Returns the server's representation of the deployment, and an error, if there is any.
-func (c *deployments) Create(deployment *v1beta1.Deployment) (result *v1beta1.Deployment, err error) {
+func (c *deployments) Create(ctx context.Context, deployment *v1beta1.Deployment, opts v1.CreateOptions) (result *v1beta1.Deployment, err error) {
 	result = &v1beta1.Deployment{}
 	err = c.client.Post().
 		Namespace(c.ns).
 		Resource("deployments").
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(deployment).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Update takes the representation of a deployment and updates it. Returns the server's representation of the deployment, and an error, if there is any.
-func (c *deployments) Update(deployment *v1beta1.Deployment) (result *v1beta1.Deployment, err error) {
+func (c *deployments) Update(ctx context.Context, deployment *v1beta1.Deployment, opts v1.UpdateOptions) (result *v1beta1.Deployment, err error) {
 	result = &v1beta1.Deployment{}
 	err = c.client.Put().
 		Namespace(c.ns).
 		Resource("deployments").
 		Name(deployment.Name).
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(deployment).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // UpdateStatus was generated because the type contains a Status member.
 // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
-
-func (c *deployments) UpdateStatus(deployment *v1beta1.Deployment) (result *v1beta1.Deployment, err error) {
+func (c *deployments) UpdateStatus(ctx context.Context, deployment *v1beta1.Deployment, opts v1.UpdateOptions) (result *v1beta1.Deployment, err error) {
 	result = &v1beta1.Deployment{}
 	err = c.client.Put().
 		Namespace(c.ns).
 		Resource("deployments").
 		Name(deployment.Name).
 		SubResource("status").
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(deployment).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Delete takes name of the deployment and deletes it. Returns an error if one occurs.
-func (c *deployments) Delete(name string, options *v1.DeleteOptions) error {
+func (c *deployments) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
 	return c.client.Delete().
 		Namespace(c.ns).
 		Resource("deployments").
 		Name(name).
-		Body(options).
-		Do().
+		Body(&opts).
+		Do(ctx).
 		Error()
 }
 
 // DeleteCollection deletes a collection of objects.
-func (c *deployments) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
+func (c *deployments) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
 	var timeout time.Duration
-	if listOptions.TimeoutSeconds != nil {
-		timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second
+	if listOpts.TimeoutSeconds != nil {
+		timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
 	}
 	return c.client.Delete().
 		Namespace(c.ns).
 		Resource("deployments").
-		VersionedParams(&listOptions, scheme.ParameterCodec).
+		VersionedParams(&listOpts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Body(options).
-		Do().
+		Body(&opts).
+		Do(ctx).
 		Error()
 }
 
 // Patch applies the patch and returns the patched deployment.
-func (c *deployments) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.Deployment, err error) {
+func (c *deployments) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.Deployment, err error) {
 	result = &v1beta1.Deployment{}
 	err = c.client.Patch(pt).
 		Namespace(c.ns).
 		Resource("deployments").
-		SubResource(subresources...).
 		Name(name).
+		SubResource(subresources...).
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(data).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/statefulset.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/statefulset.go
index c4b35b42..32ec548a 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/statefulset.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/statefulset.go
@@ -19,6 +19,7 @@ limitations under the License.
 package v1beta1
 
 import (
+	"context"
 	"time"
 
 	v1beta1 "k8s.io/api/apps/v1beta1"
@@ -37,15 +38,15 @@ type StatefulSetsGetter interface {
 
 // StatefulSetInterface has methods to work with StatefulSet resources.
 type StatefulSetInterface interface {
-	Create(*v1beta1.StatefulSet) (*v1beta1.StatefulSet, error)
-	Update(*v1beta1.StatefulSet) (*v1beta1.StatefulSet, error)
-	UpdateStatus(*v1beta1.StatefulSet) (*v1beta1.StatefulSet, error)
-	Delete(name string, options *v1.DeleteOptions) error
-	DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error
-	Get(name string, options v1.GetOptions) (*v1beta1.StatefulSet, error)
-	List(opts v1.ListOptions) (*v1beta1.StatefulSetList, error)
-	Watch(opts v1.ListOptions) (watch.Interface, error)
-	Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.StatefulSet, err error)
+	Create(ctx context.Context, statefulSet *v1beta1.StatefulSet, opts v1.CreateOptions) (*v1beta1.StatefulSet, error)
+	Update(ctx context.Context, statefulSet *v1beta1.StatefulSet, opts v1.UpdateOptions) (*v1beta1.StatefulSet, error)
+	UpdateStatus(ctx context.Context, statefulSet *v1beta1.StatefulSet, opts v1.UpdateOptions) (*v1beta1.StatefulSet, error)
+	Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
+	DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
+	Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.StatefulSet, error)
+	List(ctx context.Context, opts v1.ListOptions) (*v1beta1.StatefulSetList, error)
+	Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
+	Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.StatefulSet, err error)
 	StatefulSetExpansion
 }
 
@@ -64,20 +65,20 @@ func newStatefulSets(c *AppsV1beta1Client, namespace string) *statefulSets {
 }
 
 // Get takes name of the statefulSet, and returns the corresponding statefulSet object, and an error if there is any.
-func (c *statefulSets) Get(name string, options v1.GetOptions) (result *v1beta1.StatefulSet, err error) {
+func (c *statefulSets) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.StatefulSet, err error) {
 	result = &v1beta1.StatefulSet{}
 	err = c.client.Get().
 		Namespace(c.ns).
 		Resource("statefulsets").
 		Name(name).
 		VersionedParams(&options, scheme.ParameterCodec).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // List takes label and field selectors, and returns the list of StatefulSets that match those selectors.
-func (c *statefulSets) List(opts v1.ListOptions) (result *v1beta1.StatefulSetList, err error) {
+func (c *statefulSets) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.StatefulSetList, err error) {
 	var timeout time.Duration
 	if opts.TimeoutSeconds != nil {
 		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
@@ -88,13 +89,13 @@ func (c *statefulSets) List(opts v1.ListOptions) (result *v1beta1.StatefulSetLis
 		Resource("statefulsets").
 		VersionedParams(&opts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Watch returns a watch.Interface that watches the requested statefulSets.
-func (c *statefulSets) Watch(opts v1.ListOptions) (watch.Interface, error) {
+func (c *statefulSets) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
 	var timeout time.Duration
 	if opts.TimeoutSeconds != nil {
 		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
@@ -105,87 +106,90 @@ func (c *statefulSets) Watch(opts v1.ListOptions) (watch.Interface, error) {
 		Resource("statefulsets").
 		VersionedParams(&opts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Watch()
+		Watch(ctx)
 }
 
 // Create takes the representation of a statefulSet and creates it.  Returns the server's representation of the statefulSet, and an error, if there is any.
-func (c *statefulSets) Create(statefulSet *v1beta1.StatefulSet) (result *v1beta1.StatefulSet, err error) {
+func (c *statefulSets) Create(ctx context.Context, statefulSet *v1beta1.StatefulSet, opts v1.CreateOptions) (result *v1beta1.StatefulSet, err error) {
 	result = &v1beta1.StatefulSet{}
 	err = c.client.Post().
 		Namespace(c.ns).
 		Resource("statefulsets").
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(statefulSet).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Update takes the representation of a statefulSet and updates it. Returns the server's representation of the statefulSet, and an error, if there is any.
-func (c *statefulSets) Update(statefulSet *v1beta1.StatefulSet) (result *v1beta1.StatefulSet, err error) {
+func (c *statefulSets) Update(ctx context.Context, statefulSet *v1beta1.StatefulSet, opts v1.UpdateOptions) (result *v1beta1.StatefulSet, err error) {
 	result = &v1beta1.StatefulSet{}
 	err = c.client.Put().
 		Namespace(c.ns).
 		Resource("statefulsets").
 		Name(statefulSet.Name).
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(statefulSet).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // UpdateStatus was generated because the type contains a Status member.
 // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
-
-func (c *statefulSets) UpdateStatus(statefulSet *v1beta1.StatefulSet) (result *v1beta1.StatefulSet, err error) {
+func (c *statefulSets) UpdateStatus(ctx context.Context, statefulSet *v1beta1.StatefulSet, opts v1.UpdateOptions) (result *v1beta1.StatefulSet, err error) {
 	result = &v1beta1.StatefulSet{}
 	err = c.client.Put().
 		Namespace(c.ns).
 		Resource("statefulsets").
 		Name(statefulSet.Name).
 		SubResource("status").
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(statefulSet).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Delete takes name of the statefulSet and deletes it. Returns an error if one occurs.
-func (c *statefulSets) Delete(name string, options *v1.DeleteOptions) error {
+func (c *statefulSets) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
 	return c.client.Delete().
 		Namespace(c.ns).
 		Resource("statefulsets").
 		Name(name).
-		Body(options).
-		Do().
+		Body(&opts).
+		Do(ctx).
 		Error()
 }
 
 // DeleteCollection deletes a collection of objects.
-func (c *statefulSets) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
+func (c *statefulSets) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
 	var timeout time.Duration
-	if listOptions.TimeoutSeconds != nil {
-		timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second
+	if listOpts.TimeoutSeconds != nil {
+		timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
 	}
 	return c.client.Delete().
 		Namespace(c.ns).
 		Resource("statefulsets").
-		VersionedParams(&listOptions, scheme.ParameterCodec).
+		VersionedParams(&listOpts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Body(options).
-		Do().
+		Body(&opts).
+		Do(ctx).
 		Error()
 }
 
 // Patch applies the patch and returns the patched statefulSet.
-func (c *statefulSets) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.StatefulSet, err error) {
+func (c *statefulSets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.StatefulSet, err error) {
 	result = &v1beta1.StatefulSet{}
 	err = c.client.Patch(pt).
 		Namespace(c.ns).
 		Resource("statefulsets").
-		SubResource(subresources...).
 		Name(name).
+		SubResource(subresources...).
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(data).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/controllerrevision.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/controllerrevision.go
index e1d60251..e8de2d0f 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/controllerrevision.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/controllerrevision.go
@@ -19,6 +19,7 @@ limitations under the License.
 package v1beta2
 
 import (
+	"context"
 	"time"
 
 	v1beta2 "k8s.io/api/apps/v1beta2"
@@ -37,14 +38,14 @@ type ControllerRevisionsGetter interface {
 
 // ControllerRevisionInterface has methods to work with ControllerRevision resources.
 type ControllerRevisionInterface interface {
-	Create(*v1beta2.ControllerRevision) (*v1beta2.ControllerRevision, error)
-	Update(*v1beta2.ControllerRevision) (*v1beta2.ControllerRevision, error)
-	Delete(name string, options *v1.DeleteOptions) error
-	DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error
-	Get(name string, options v1.GetOptions) (*v1beta2.ControllerRevision, error)
-	List(opts v1.ListOptions) (*v1beta2.ControllerRevisionList, error)
-	Watch(opts v1.ListOptions) (watch.Interface, error)
-	Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta2.ControllerRevision, err error)
+	Create(ctx context.Context, controllerRevision *v1beta2.ControllerRevision, opts v1.CreateOptions) (*v1beta2.ControllerRevision, error)
+	Update(ctx context.Context, controllerRevision *v1beta2.ControllerRevision, opts v1.UpdateOptions) (*v1beta2.ControllerRevision, error)
+	Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
+	DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
+	Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta2.ControllerRevision, error)
+	List(ctx context.Context, opts v1.ListOptions) (*v1beta2.ControllerRevisionList, error)
+	Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
+	Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta2.ControllerRevision, err error)
 	ControllerRevisionExpansion
 }
 
@@ -63,20 +64,20 @@ func newControllerRevisions(c *AppsV1beta2Client, namespace string) *controllerR
 }
 
 // Get takes name of the controllerRevision, and returns the corresponding controllerRevision object, and an error if there is any.
-func (c *controllerRevisions) Get(name string, options v1.GetOptions) (result *v1beta2.ControllerRevision, err error) {
+func (c *controllerRevisions) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta2.ControllerRevision, err error) {
 	result = &v1beta2.ControllerRevision{}
 	err = c.client.Get().
 		Namespace(c.ns).
 		Resource("controllerrevisions").
 		Name(name).
 		VersionedParams(&options, scheme.ParameterCodec).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // List takes label and field selectors, and returns the list of ControllerRevisions that match those selectors.
-func (c *controllerRevisions) List(opts v1.ListOptions) (result *v1beta2.ControllerRevisionList, err error) {
+func (c *controllerRevisions) List(ctx context.Context, opts v1.ListOptions) (result *v1beta2.ControllerRevisionList, err error) {
 	var timeout time.Duration
 	if opts.TimeoutSeconds != nil {
 		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
@@ -87,13 +88,13 @@ func (c *controllerRevisions) List(opts v1.ListOptions) (result *v1beta2.Control
 		Resource("controllerrevisions").
 		VersionedParams(&opts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Watch returns a watch.Interface that watches the requested controllerRevisions.
-func (c *controllerRevisions) Watch(opts v1.ListOptions) (watch.Interface, error) {
+func (c *controllerRevisions) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
 	var timeout time.Duration
 	if opts.TimeoutSeconds != nil {
 		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
@@ -104,71 +105,74 @@ func (c *controllerRevisions) Watch(opts v1.ListOptions) (watch.Interface, error
 		Resource("controllerrevisions").
 		VersionedParams(&opts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Watch()
+		Watch(ctx)
 }
 
 // Create takes the representation of a controllerRevision and creates it.  Returns the server's representation of the controllerRevision, and an error, if there is any.
-func (c *controllerRevisions) Create(controllerRevision *v1beta2.ControllerRevision) (result *v1beta2.ControllerRevision, err error) {
+func (c *controllerRevisions) Create(ctx context.Context, controllerRevision *v1beta2.ControllerRevision, opts v1.CreateOptions) (result *v1beta2.ControllerRevision, err error) {
 	result = &v1beta2.ControllerRevision{}
 	err = c.client.Post().
 		Namespace(c.ns).
 		Resource("controllerrevisions").
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(controllerRevision).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Update takes the representation of a controllerRevision and updates it. Returns the server's representation of the controllerRevision, and an error, if there is any.
-func (c *controllerRevisions) Update(controllerRevision *v1beta2.ControllerRevision) (result *v1beta2.ControllerRevision, err error) {
+func (c *controllerRevisions) Update(ctx context.Context, controllerRevision *v1beta2.ControllerRevision, opts v1.UpdateOptions) (result *v1beta2.ControllerRevision, err error) {
 	result = &v1beta2.ControllerRevision{}
 	err = c.client.Put().
 		Namespace(c.ns).
 		Resource("controllerrevisions").
 		Name(controllerRevision.Name).
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(controllerRevision).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Delete takes name of the controllerRevision and deletes it. Returns an error if one occurs.
-func (c *controllerRevisions) Delete(name string, options *v1.DeleteOptions) error {
+func (c *controllerRevisions) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
 	return c.client.Delete().
 		Namespace(c.ns).
 		Resource("controllerrevisions").
 		Name(name).
-		Body(options).
-		Do().
+		Body(&opts).
+		Do(ctx).
 		Error()
 }
 
 // DeleteCollection deletes a collection of objects.
-func (c *controllerRevisions) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
+func (c *controllerRevisions) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
 	var timeout time.Duration
-	if listOptions.TimeoutSeconds != nil {
-		timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second
+	if listOpts.TimeoutSeconds != nil {
+		timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
 	}
 	return c.client.Delete().
 		Namespace(c.ns).
 		Resource("controllerrevisions").
-		VersionedParams(&listOptions, scheme.ParameterCodec).
+		VersionedParams(&listOpts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Body(options).
-		Do().
+		Body(&opts).
+		Do(ctx).
 		Error()
 }
 
 // Patch applies the patch and returns the patched controllerRevision.
-func (c *controllerRevisions) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta2.ControllerRevision, err error) {
+func (c *controllerRevisions) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta2.ControllerRevision, err error) {
 	result = &v1beta2.ControllerRevision{}
 	err = c.client.Patch(pt).
 		Namespace(c.ns).
 		Resource("controllerrevisions").
-		SubResource(subresources...).
 		Name(name).
+		SubResource(subresources...).
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(data).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/daemonset.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/daemonset.go
index f8b7ac25..6d3a26d3 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/daemonset.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/daemonset.go
@@ -19,6 +19,7 @@ limitations under the License.
 package v1beta2
 
 import (
+	"context"
 	"time"
 
 	v1beta2 "k8s.io/api/apps/v1beta2"
@@ -37,15 +38,15 @@ type DaemonSetsGetter interface {
 
 // DaemonSetInterface has methods to work with DaemonSet resources.
 type DaemonSetInterface interface {
-	Create(*v1beta2.DaemonSet) (*v1beta2.DaemonSet, error)
-	Update(*v1beta2.DaemonSet) (*v1beta2.DaemonSet, error)
-	UpdateStatus(*v1beta2.DaemonSet) (*v1beta2.DaemonSet, error)
-	Delete(name string, options *v1.DeleteOptions) error
-	DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error
-	Get(name string, options v1.GetOptions) (*v1beta2.DaemonSet, error)
-	List(opts v1.ListOptions) (*v1beta2.DaemonSetList, error)
-	Watch(opts v1.ListOptions) (watch.Interface, error)
-	Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta2.DaemonSet, err error)
+	Create(ctx context.Context, daemonSet *v1beta2.DaemonSet, opts v1.CreateOptions) (*v1beta2.DaemonSet, error)
+	Update(ctx context.Context, daemonSet *v1beta2.DaemonSet, opts v1.UpdateOptions) (*v1beta2.DaemonSet, error)
+	UpdateStatus(ctx context.Context, daemonSet *v1beta2.DaemonSet, opts v1.UpdateOptions) (*v1beta2.DaemonSet, error)
+	Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
+	DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
+	Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta2.DaemonSet, error)
+	List(ctx context.Context, opts v1.ListOptions) (*v1beta2.DaemonSetList, error)
+	Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
+	Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta2.DaemonSet, err error)
 	DaemonSetExpansion
 }
 
@@ -64,20 +65,20 @@ func newDaemonSets(c *AppsV1beta2Client, namespace string) *daemonSets {
 }
 
 // Get takes name of the daemonSet, and returns the corresponding daemonSet object, and an error if there is any.
-func (c *daemonSets) Get(name string, options v1.GetOptions) (result *v1beta2.DaemonSet, err error) {
+func (c *daemonSets) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta2.DaemonSet, err error) {
 	result = &v1beta2.DaemonSet{}
 	err = c.client.Get().
 		Namespace(c.ns).
 		Resource("daemonsets").
 		Name(name).
 		VersionedParams(&options, scheme.ParameterCodec).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // List takes label and field selectors, and returns the list of DaemonSets that match those selectors.
-func (c *daemonSets) List(opts v1.ListOptions) (result *v1beta2.DaemonSetList, err error) {
+func (c *daemonSets) List(ctx context.Context, opts v1.ListOptions) (result *v1beta2.DaemonSetList, err error) {
 	var timeout time.Duration
 	if opts.TimeoutSeconds != nil {
 		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
@@ -88,13 +89,13 @@ func (c *daemonSets) List(opts v1.ListOptions) (result *v1beta2.DaemonSetList, e
 		Resource("daemonsets").
 		VersionedParams(&opts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Watch returns a watch.Interface that watches the requested daemonSets.
-func (c *daemonSets) Watch(opts v1.ListOptions) (watch.Interface, error) {
+func (c *daemonSets) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
 	var timeout time.Duration
 	if opts.TimeoutSeconds != nil {
 		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
@@ -105,87 +106,90 @@ func (c *daemonSets) Watch(opts v1.ListOptions) (watch.Interface, error) {
 		Resource("daemonsets").
 		VersionedParams(&opts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Watch()
+		Watch(ctx)
 }
 
 // Create takes the representation of a daemonSet and creates it.  Returns the server's representation of the daemonSet, and an error, if there is any.
-func (c *daemonSets) Create(daemonSet *v1beta2.DaemonSet) (result *v1beta2.DaemonSet, err error) {
+func (c *daemonSets) Create(ctx context.Context, daemonSet *v1beta2.DaemonSet, opts v1.CreateOptions) (result *v1beta2.DaemonSet, err error) {
 	result = &v1beta2.DaemonSet{}
 	err = c.client.Post().
 		Namespace(c.ns).
 		Resource("daemonsets").
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(daemonSet).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Update takes the representation of a daemonSet and updates it. Returns the server's representation of the daemonSet, and an error, if there is any.
-func (c *daemonSets) Update(daemonSet *v1beta2.DaemonSet) (result *v1beta2.DaemonSet, err error) {
+func (c *daemonSets) Update(ctx context.Context, daemonSet *v1beta2.DaemonSet, opts v1.UpdateOptions) (result *v1beta2.DaemonSet, err error) {
 	result = &v1beta2.DaemonSet{}
 	err = c.client.Put().
 		Namespace(c.ns).
 		Resource("daemonsets").
 		Name(daemonSet.Name).
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(daemonSet).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // UpdateStatus was generated because the type contains a Status member.
 // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
-
-func (c *daemonSets) UpdateStatus(daemonSet *v1beta2.DaemonSet) (result *v1beta2.DaemonSet, err error) {
+func (c *daemonSets) UpdateStatus(ctx context.Context, daemonSet *v1beta2.DaemonSet, opts v1.UpdateOptions) (result *v1beta2.DaemonSet, err error) {
 	result = &v1beta2.DaemonSet{}
 	err = c.client.Put().
 		Namespace(c.ns).
 		Resource("daemonsets").
 		Name(daemonSet.Name).
 		SubResource("status").
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(daemonSet).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Delete takes name of the daemonSet and deletes it. Returns an error if one occurs.
-func (c *daemonSets) Delete(name string, options *v1.DeleteOptions) error {
+func (c *daemonSets) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
 	return c.client.Delete().
 		Namespace(c.ns).
 		Resource("daemonsets").
 		Name(name).
-		Body(options).
-		Do().
+		Body(&opts).
+		Do(ctx).
 		Error()
 }
 
 // DeleteCollection deletes a collection of objects.
-func (c *daemonSets) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
+func (c *daemonSets) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
 	var timeout time.Duration
-	if listOptions.TimeoutSeconds != nil {
-		timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second
+	if listOpts.TimeoutSeconds != nil {
+		timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
 	}
 	return c.client.Delete().
 		Namespace(c.ns).
 		Resource("daemonsets").
-		VersionedParams(&listOptions, scheme.ParameterCodec).
+		VersionedParams(&listOpts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Body(options).
-		Do().
+		Body(&opts).
+		Do(ctx).
 		Error()
 }
 
 // Patch applies the patch and returns the patched daemonSet.
-func (c *daemonSets) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta2.DaemonSet, err error) {
+func (c *daemonSets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta2.DaemonSet, err error) {
 	result = &v1beta2.DaemonSet{}
 	err = c.client.Patch(pt).
 		Namespace(c.ns).
 		Resource("daemonsets").
-		SubResource(subresources...).
 		Name(name).
+		SubResource(subresources...).
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(data).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/deployment.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/deployment.go
index 510250b0..2cdb539e 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/deployment.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/deployment.go
@@ -19,6 +19,7 @@ limitations under the License.
 package v1beta2
 
 import (
+	"context"
 	"time"
 
 	v1beta2 "k8s.io/api/apps/v1beta2"
@@ -37,15 +38,15 @@ type DeploymentsGetter interface {
 
 // DeploymentInterface has methods to work with Deployment resources.
 type DeploymentInterface interface {
-	Create(*v1beta2.Deployment) (*v1beta2.Deployment, error)
-	Update(*v1beta2.Deployment) (*v1beta2.Deployment, error)
-	UpdateStatus(*v1beta2.Deployment) (*v1beta2.Deployment, error)
-	Delete(name string, options *v1.DeleteOptions) error
-	DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error
-	Get(name string, options v1.GetOptions) (*v1beta2.Deployment, error)
-	List(opts v1.ListOptions) (*v1beta2.DeploymentList, error)
-	Watch(opts v1.ListOptions) (watch.Interface, error)
-	Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta2.Deployment, err error)
+	Create(ctx context.Context, deployment *v1beta2.Deployment, opts v1.CreateOptions) (*v1beta2.Deployment, error)
+	Update(ctx context.Context, deployment *v1beta2.Deployment, opts v1.UpdateOptions) (*v1beta2.Deployment, error)
+	UpdateStatus(ctx context.Context, deployment *v1beta2.Deployment, opts v1.UpdateOptions) (*v1beta2.Deployment, error)
+	Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
+	DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
+	Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta2.Deployment, error)
+	List(ctx context.Context, opts v1.ListOptions) (*v1beta2.DeploymentList, error)
+	Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
+	Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta2.Deployment, err error)
 	DeploymentExpansion
 }
 
@@ -64,20 +65,20 @@ func newDeployments(c *AppsV1beta2Client, namespace string) *deployments {
 }
 
 // Get takes name of the deployment, and returns the corresponding deployment object, and an error if there is any.
-func (c *deployments) Get(name string, options v1.GetOptions) (result *v1beta2.Deployment, err error) {
+func (c *deployments) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta2.Deployment, err error) {
 	result = &v1beta2.Deployment{}
 	err = c.client.Get().
 		Namespace(c.ns).
 		Resource("deployments").
 		Name(name).
 		VersionedParams(&options, scheme.ParameterCodec).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // List takes label and field selectors, and returns the list of Deployments that match those selectors.
-func (c *deployments) List(opts v1.ListOptions) (result *v1beta2.DeploymentList, err error) {
+func (c *deployments) List(ctx context.Context, opts v1.ListOptions) (result *v1beta2.DeploymentList, err error) {
 	var timeout time.Duration
 	if opts.TimeoutSeconds != nil {
 		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
@@ -88,13 +89,13 @@ func (c *deployments) List(opts v1.ListOptions) (result *v1beta2.DeploymentList,
 		Resource("deployments").
 		VersionedParams(&opts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Watch returns a watch.Interface that watches the requested deployments.
-func (c *deployments) Watch(opts v1.ListOptions) (watch.Interface, error) {
+func (c *deployments) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
 	var timeout time.Duration
 	if opts.TimeoutSeconds != nil {
 		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
@@ -105,87 +106,90 @@ func (c *deployments) Watch(opts v1.ListOptions) (watch.Interface, error) {
 		Resource("deployments").
 		VersionedParams(&opts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Watch()
+		Watch(ctx)
 }
 
 // Create takes the representation of a deployment and creates it.  Returns the server's representation of the deployment, and an error, if there is any.
-func (c *deployments) Create(deployment *v1beta2.Deployment) (result *v1beta2.Deployment, err error) {
+func (c *deployments) Create(ctx context.Context, deployment *v1beta2.Deployment, opts v1.CreateOptions) (result *v1beta2.Deployment, err error) {
 	result = &v1beta2.Deployment{}
 	err = c.client.Post().
 		Namespace(c.ns).
 		Resource("deployments").
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(deployment).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Update takes the representation of a deployment and updates it. Returns the server's representation of the deployment, and an error, if there is any.
-func (c *deployments) Update(deployment *v1beta2.Deployment) (result *v1beta2.Deployment, err error) {
+func (c *deployments) Update(ctx context.Context, deployment *v1beta2.Deployment, opts v1.UpdateOptions) (result *v1beta2.Deployment, err error) {
 	result = &v1beta2.Deployment{}
 	err = c.client.Put().
 		Namespace(c.ns).
 		Resource("deployments").
 		Name(deployment.Name).
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(deployment).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // UpdateStatus was generated because the type contains a Status member.
 // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
-
-func (c *deployments) UpdateStatus(deployment *v1beta2.Deployment) (result *v1beta2.Deployment, err error) {
+func (c *deployments) UpdateStatus(ctx context.Context, deployment *v1beta2.Deployment, opts v1.UpdateOptions) (result *v1beta2.Deployment, err error) {
 	result = &v1beta2.Deployment{}
 	err = c.client.Put().
 		Namespace(c.ns).
 		Resource("deployments").
 		Name(deployment.Name).
 		SubResource("status").
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(deployment).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Delete takes name of the deployment and deletes it. Returns an error if one occurs.
-func (c *deployments) Delete(name string, options *v1.DeleteOptions) error {
+func (c *deployments) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
 	return c.client.Delete().
 		Namespace(c.ns).
 		Resource("deployments").
 		Name(name).
-		Body(options).
-		Do().
+		Body(&opts).
+		Do(ctx).
 		Error()
 }
 
 // DeleteCollection deletes a collection of objects.
-func (c *deployments) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
+func (c *deployments) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
 	var timeout time.Duration
-	if listOptions.TimeoutSeconds != nil {
-		timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second
+	if listOpts.TimeoutSeconds != nil {
+		timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
 	}
 	return c.client.Delete().
 		Namespace(c.ns).
 		Resource("deployments").
-		VersionedParams(&listOptions, scheme.ParameterCodec).
+		VersionedParams(&listOpts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Body(options).
-		Do().
+		Body(&opts).
+		Do(ctx).
 		Error()
 }
 
 // Patch applies the patch and returns the patched deployment.
-func (c *deployments) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta2.Deployment, err error) {
+func (c *deployments) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta2.Deployment, err error) {
 	result = &v1beta2.Deployment{}
 	err = c.client.Patch(pt).
 		Namespace(c.ns).
 		Resource("deployments").
-		SubResource(subresources...).
 		Name(name).
+		SubResource(subresources...).
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(data).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/replicaset.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/replicaset.go
index 7b738774..d7365beb 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/replicaset.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/replicaset.go
@@ -19,6 +19,7 @@ limitations under the License.
 package v1beta2
 
 import (
+	"context"
 	"time"
 
 	v1beta2 "k8s.io/api/apps/v1beta2"
@@ -37,15 +38,15 @@ type ReplicaSetsGetter interface {
 
 // ReplicaSetInterface has methods to work with ReplicaSet resources.
 type ReplicaSetInterface interface {
-	Create(*v1beta2.ReplicaSet) (*v1beta2.ReplicaSet, error)
-	Update(*v1beta2.ReplicaSet) (*v1beta2.ReplicaSet, error)
-	UpdateStatus(*v1beta2.ReplicaSet) (*v1beta2.ReplicaSet, error)
-	Delete(name string, options *v1.DeleteOptions) error
-	DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error
-	Get(name string, options v1.GetOptions) (*v1beta2.ReplicaSet, error)
-	List(opts v1.ListOptions) (*v1beta2.ReplicaSetList, error)
-	Watch(opts v1.ListOptions) (watch.Interface, error)
-	Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta2.ReplicaSet, err error)
+	Create(ctx context.Context, replicaSet *v1beta2.ReplicaSet, opts v1.CreateOptions) (*v1beta2.ReplicaSet, error)
+	Update(ctx context.Context, replicaSet *v1beta2.ReplicaSet, opts v1.UpdateOptions) (*v1beta2.ReplicaSet, error)
+	UpdateStatus(ctx context.Context, replicaSet *v1beta2.ReplicaSet, opts v1.UpdateOptions) (*v1beta2.ReplicaSet, error)
+	Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
+	DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
+	Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta2.ReplicaSet, error)
+	List(ctx context.Context, opts v1.ListOptions) (*v1beta2.ReplicaSetList, error)
+	Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
+	Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta2.ReplicaSet, err error)
 	ReplicaSetExpansion
 }
 
@@ -64,20 +65,20 @@ func newReplicaSets(c *AppsV1beta2Client, namespace string) *replicaSets {
 }
 
 // Get takes name of the replicaSet, and returns the corresponding replicaSet object, and an error if there is any.
-func (c *replicaSets) Get(name string, options v1.GetOptions) (result *v1beta2.ReplicaSet, err error) {
+func (c *replicaSets) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta2.ReplicaSet, err error) {
 	result = &v1beta2.ReplicaSet{}
 	err = c.client.Get().
 		Namespace(c.ns).
 		Resource("replicasets").
 		Name(name).
 		VersionedParams(&options, scheme.ParameterCodec).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // List takes label and field selectors, and returns the list of ReplicaSets that match those selectors.
-func (c *replicaSets) List(opts v1.ListOptions) (result *v1beta2.ReplicaSetList, err error) {
+func (c *replicaSets) List(ctx context.Context, opts v1.ListOptions) (result *v1beta2.ReplicaSetList, err error) {
 	var timeout time.Duration
 	if opts.TimeoutSeconds != nil {
 		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
@@ -88,13 +89,13 @@ func (c *replicaSets) List(opts v1.ListOptions) (result *v1beta2.ReplicaSetList,
 		Resource("replicasets").
 		VersionedParams(&opts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Watch returns a watch.Interface that watches the requested replicaSets.
-func (c *replicaSets) Watch(opts v1.ListOptions) (watch.Interface, error) {
+func (c *replicaSets) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
 	var timeout time.Duration
 	if opts.TimeoutSeconds != nil {
 		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
@@ -105,87 +106,90 @@ func (c *replicaSets) Watch(opts v1.ListOptions) (watch.Interface, error) {
 		Resource("replicasets").
 		VersionedParams(&opts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Watch()
+		Watch(ctx)
 }
 
 // Create takes the representation of a replicaSet and creates it.  Returns the server's representation of the replicaSet, and an error, if there is any.
-func (c *replicaSets) Create(replicaSet *v1beta2.ReplicaSet) (result *v1beta2.ReplicaSet, err error) {
+func (c *replicaSets) Create(ctx context.Context, replicaSet *v1beta2.ReplicaSet, opts v1.CreateOptions) (result *v1beta2.ReplicaSet, err error) {
 	result = &v1beta2.ReplicaSet{}
 	err = c.client.Post().
 		Namespace(c.ns).
 		Resource("replicasets").
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(replicaSet).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Update takes the representation of a replicaSet and updates it. Returns the server's representation of the replicaSet, and an error, if there is any.
-func (c *replicaSets) Update(replicaSet *v1beta2.ReplicaSet) (result *v1beta2.ReplicaSet, err error) {
+func (c *replicaSets) Update(ctx context.Context, replicaSet *v1beta2.ReplicaSet, opts v1.UpdateOptions) (result *v1beta2.ReplicaSet, err error) {
 	result = &v1beta2.ReplicaSet{}
 	err = c.client.Put().
 		Namespace(c.ns).
 		Resource("replicasets").
 		Name(replicaSet.Name).
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(replicaSet).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // UpdateStatus was generated because the type contains a Status member.
 // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
-
-func (c *replicaSets) UpdateStatus(replicaSet *v1beta2.ReplicaSet) (result *v1beta2.ReplicaSet, err error) {
+func (c *replicaSets) UpdateStatus(ctx context.Context, replicaSet *v1beta2.ReplicaSet, opts v1.UpdateOptions) (result *v1beta2.ReplicaSet, err error) {
 	result = &v1beta2.ReplicaSet{}
 	err = c.client.Put().
 		Namespace(c.ns).
 		Resource("replicasets").
 		Name(replicaSet.Name).
 		SubResource("status").
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(replicaSet).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Delete takes name of the replicaSet and deletes it. Returns an error if one occurs.
-func (c *replicaSets) Delete(name string, options *v1.DeleteOptions) error {
+func (c *replicaSets) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
 	return c.client.Delete().
 		Namespace(c.ns).
 		Resource("replicasets").
 		Name(name).
-		Body(options).
-		Do().
+		Body(&opts).
+		Do(ctx).
 		Error()
 }
 
 // DeleteCollection deletes a collection of objects.
-func (c *replicaSets) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
+func (c *replicaSets) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
 	var timeout time.Duration
-	if listOptions.TimeoutSeconds != nil {
-		timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second
+	if listOpts.TimeoutSeconds != nil {
+		timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
 	}
 	return c.client.Delete().
 		Namespace(c.ns).
 		Resource("replicasets").
-		VersionedParams(&listOptions, scheme.ParameterCodec).
+		VersionedParams(&listOpts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Body(options).
-		Do().
+		Body(&opts).
+		Do(ctx).
 		Error()
 }
 
 // Patch applies the patch and returns the patched replicaSet.
-func (c *replicaSets) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta2.ReplicaSet, err error) {
+func (c *replicaSets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta2.ReplicaSet, err error) {
 	result = &v1beta2.ReplicaSet{}
 	err = c.client.Patch(pt).
 		Namespace(c.ns).
 		Resource("replicasets").
-		SubResource(subresources...).
 		Name(name).
+		SubResource(subresources...).
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(data).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/statefulset.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/statefulset.go
index de7c3db8..74583169 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/statefulset.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/statefulset.go
@@ -19,6 +19,7 @@ limitations under the License.
 package v1beta2
 
 import (
+	"context"
 	"time"
 
 	v1beta2 "k8s.io/api/apps/v1beta2"
@@ -37,17 +38,17 @@ type StatefulSetsGetter interface {
 
 // StatefulSetInterface has methods to work with StatefulSet resources.
 type StatefulSetInterface interface {
-	Create(*v1beta2.StatefulSet) (*v1beta2.StatefulSet, error)
-	Update(*v1beta2.StatefulSet) (*v1beta2.StatefulSet, error)
-	UpdateStatus(*v1beta2.StatefulSet) (*v1beta2.StatefulSet, error)
-	Delete(name string, options *v1.DeleteOptions) error
-	DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error
-	Get(name string, options v1.GetOptions) (*v1beta2.StatefulSet, error)
-	List(opts v1.ListOptions) (*v1beta2.StatefulSetList, error)
-	Watch(opts v1.ListOptions) (watch.Interface, error)
-	Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta2.StatefulSet, err error)
-	GetScale(statefulSetName string, options v1.GetOptions) (*v1beta2.Scale, error)
-	UpdateScale(statefulSetName string, scale *v1beta2.Scale) (*v1beta2.Scale, error)
+	Create(ctx context.Context, statefulSet *v1beta2.StatefulSet, opts v1.CreateOptions) (*v1beta2.StatefulSet, error)
+	Update(ctx context.Context, statefulSet *v1beta2.StatefulSet, opts v1.UpdateOptions) (*v1beta2.StatefulSet, error)
+	UpdateStatus(ctx context.Context, statefulSet *v1beta2.StatefulSet, opts v1.UpdateOptions) (*v1beta2.StatefulSet, error)
+	Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
+	DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
+	Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta2.StatefulSet, error)
+	List(ctx context.Context, opts v1.ListOptions) (*v1beta2.StatefulSetList, error)
+	Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
+	Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta2.StatefulSet, err error)
+	GetScale(ctx context.Context, statefulSetName string, options v1.GetOptions) (*v1beta2.Scale, error)
+	UpdateScale(ctx context.Context, statefulSetName string, scale *v1beta2.Scale, opts v1.UpdateOptions) (*v1beta2.Scale, error)
 
 	StatefulSetExpansion
 }
@@ -67,20 +68,20 @@ func newStatefulSets(c *AppsV1beta2Client, namespace string) *statefulSets {
 }
 
 // Get takes name of the statefulSet, and returns the corresponding statefulSet object, and an error if there is any.
-func (c *statefulSets) Get(name string, options v1.GetOptions) (result *v1beta2.StatefulSet, err error) {
+func (c *statefulSets) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta2.StatefulSet, err error) {
 	result = &v1beta2.StatefulSet{}
 	err = c.client.Get().
 		Namespace(c.ns).
 		Resource("statefulsets").
 		Name(name).
 		VersionedParams(&options, scheme.ParameterCodec).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // List takes label and field selectors, and returns the list of StatefulSets that match those selectors.
-func (c *statefulSets) List(opts v1.ListOptions) (result *v1beta2.StatefulSetList, err error) {
+func (c *statefulSets) List(ctx context.Context, opts v1.ListOptions) (result *v1beta2.StatefulSetList, err error) {
 	var timeout time.Duration
 	if opts.TimeoutSeconds != nil {
 		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
@@ -91,13 +92,13 @@ func (c *statefulSets) List(opts v1.ListOptions) (result *v1beta2.StatefulSetLis
 		Resource("statefulsets").
 		VersionedParams(&opts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Watch returns a watch.Interface that watches the requested statefulSets.
-func (c *statefulSets) Watch(opts v1.ListOptions) (watch.Interface, error) {
+func (c *statefulSets) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
 	var timeout time.Duration
 	if opts.TimeoutSeconds != nil {
 		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
@@ -108,93 +109,96 @@ func (c *statefulSets) Watch(opts v1.ListOptions) (watch.Interface, error) {
 		Resource("statefulsets").
 		VersionedParams(&opts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Watch()
+		Watch(ctx)
 }
 
 // Create takes the representation of a statefulSet and creates it.  Returns the server's representation of the statefulSet, and an error, if there is any.
-func (c *statefulSets) Create(statefulSet *v1beta2.StatefulSet) (result *v1beta2.StatefulSet, err error) {
+func (c *statefulSets) Create(ctx context.Context, statefulSet *v1beta2.StatefulSet, opts v1.CreateOptions) (result *v1beta2.StatefulSet, err error) {
 	result = &v1beta2.StatefulSet{}
 	err = c.client.Post().
 		Namespace(c.ns).
 		Resource("statefulsets").
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(statefulSet).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Update takes the representation of a statefulSet and updates it. Returns the server's representation of the statefulSet, and an error, if there is any.
-func (c *statefulSets) Update(statefulSet *v1beta2.StatefulSet) (result *v1beta2.StatefulSet, err error) {
+func (c *statefulSets) Update(ctx context.Context, statefulSet *v1beta2.StatefulSet, opts v1.UpdateOptions) (result *v1beta2.StatefulSet, err error) {
 	result = &v1beta2.StatefulSet{}
 	err = c.client.Put().
 		Namespace(c.ns).
 		Resource("statefulsets").
 		Name(statefulSet.Name).
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(statefulSet).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // UpdateStatus was generated because the type contains a Status member.
 // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
-
-func (c *statefulSets) UpdateStatus(statefulSet *v1beta2.StatefulSet) (result *v1beta2.StatefulSet, err error) {
+func (c *statefulSets) UpdateStatus(ctx context.Context, statefulSet *v1beta2.StatefulSet, opts v1.UpdateOptions) (result *v1beta2.StatefulSet, err error) {
 	result = &v1beta2.StatefulSet{}
 	err = c.client.Put().
 		Namespace(c.ns).
 		Resource("statefulsets").
 		Name(statefulSet.Name).
 		SubResource("status").
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(statefulSet).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Delete takes name of the statefulSet and deletes it. Returns an error if one occurs.
-func (c *statefulSets) Delete(name string, options *v1.DeleteOptions) error {
+func (c *statefulSets) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
 	return c.client.Delete().
 		Namespace(c.ns).
 		Resource("statefulsets").
 		Name(name).
-		Body(options).
-		Do().
+		Body(&opts).
+		Do(ctx).
 		Error()
 }
 
 // DeleteCollection deletes a collection of objects.
-func (c *statefulSets) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
+func (c *statefulSets) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
 	var timeout time.Duration
-	if listOptions.TimeoutSeconds != nil {
-		timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second
+	if listOpts.TimeoutSeconds != nil {
+		timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
 	}
 	return c.client.Delete().
 		Namespace(c.ns).
 		Resource("statefulsets").
-		VersionedParams(&listOptions, scheme.ParameterCodec).
+		VersionedParams(&listOpts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Body(options).
-		Do().
+		Body(&opts).
+		Do(ctx).
 		Error()
 }
 
 // Patch applies the patch and returns the patched statefulSet.
-func (c *statefulSets) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta2.StatefulSet, err error) {
+func (c *statefulSets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta2.StatefulSet, err error) {
 	result = &v1beta2.StatefulSet{}
 	err = c.client.Patch(pt).
 		Namespace(c.ns).
 		Resource("statefulsets").
-		SubResource(subresources...).
 		Name(name).
+		SubResource(subresources...).
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(data).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // GetScale takes name of the statefulSet, and returns the corresponding v1beta2.Scale object, and an error if there is any.
-func (c *statefulSets) GetScale(statefulSetName string, options v1.GetOptions) (result *v1beta2.Scale, err error) {
+func (c *statefulSets) GetScale(ctx context.Context, statefulSetName string, options v1.GetOptions) (result *v1beta2.Scale, err error) {
 	result = &v1beta2.Scale{}
 	err = c.client.Get().
 		Namespace(c.ns).
@@ -202,21 +206,22 @@ func (c *statefulSets) GetScale(statefulSetName string, options v1.GetOptions) (
 		Name(statefulSetName).
 		SubResource("scale").
 		VersionedParams(&options, scheme.ParameterCodec).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // UpdateScale takes the top resource name and the representation of a scale and updates it. Returns the server's representation of the scale, and an error, if there is any.
-func (c *statefulSets) UpdateScale(statefulSetName string, scale *v1beta2.Scale) (result *v1beta2.Scale, err error) {
+func (c *statefulSets) UpdateScale(ctx context.Context, statefulSetName string, scale *v1beta2.Scale, opts v1.UpdateOptions) (result *v1beta2.Scale, err error) {
 	result = &v1beta2.Scale{}
 	err = c.client.Put().
 		Namespace(c.ns).
 		Resource("statefulsets").
 		Name(statefulSetName).
 		SubResource("scale").
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(scale).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/auditregistration/v1alpha1/auditsink.go b/vendor/k8s.io/client-go/kubernetes/typed/auditregistration/v1alpha1/auditsink.go
index 414d4800..ea748c66 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/auditregistration/v1alpha1/auditsink.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/auditregistration/v1alpha1/auditsink.go
@@ -19,6 +19,7 @@ limitations under the License.
 package v1alpha1
 
 import (
+	"context"
 	"time"
 
 	v1alpha1 "k8s.io/api/auditregistration/v1alpha1"
@@ -37,14 +38,14 @@ type AuditSinksGetter interface {
 
 // AuditSinkInterface has methods to work with AuditSink resources.
 type AuditSinkInterface interface {
-	Create(*v1alpha1.AuditSink) (*v1alpha1.AuditSink, error)
-	Update(*v1alpha1.AuditSink) (*v1alpha1.AuditSink, error)
-	Delete(name string, options *v1.DeleteOptions) error
-	DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error
-	Get(name string, options v1.GetOptions) (*v1alpha1.AuditSink, error)
-	List(opts v1.ListOptions) (*v1alpha1.AuditSinkList, error)
-	Watch(opts v1.ListOptions) (watch.Interface, error)
-	Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.AuditSink, err error)
+	Create(ctx context.Context, auditSink *v1alpha1.AuditSink, opts v1.CreateOptions) (*v1alpha1.AuditSink, error)
+	Update(ctx context.Context, auditSink *v1alpha1.AuditSink, opts v1.UpdateOptions) (*v1alpha1.AuditSink, error)
+	Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
+	DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
+	Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.AuditSink, error)
+	List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.AuditSinkList, error)
+	Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
+	Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.AuditSink, err error)
 	AuditSinkExpansion
 }
 
@@ -61,19 +62,19 @@ func newAuditSinks(c *AuditregistrationV1alpha1Client) *auditSinks {
 }
 
 // Get takes name of the auditSink, and returns the corresponding auditSink object, and an error if there is any.
-func (c *auditSinks) Get(name string, options v1.GetOptions) (result *v1alpha1.AuditSink, err error) {
+func (c *auditSinks) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.AuditSink, err error) {
 	result = &v1alpha1.AuditSink{}
 	err = c.client.Get().
 		Resource("auditsinks").
 		Name(name).
 		VersionedParams(&options, scheme.ParameterCodec).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // List takes label and field selectors, and returns the list of AuditSinks that match those selectors.
-func (c *auditSinks) List(opts v1.ListOptions) (result *v1alpha1.AuditSinkList, err error) {
+func (c *auditSinks) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.AuditSinkList, err error) {
 	var timeout time.Duration
 	if opts.TimeoutSeconds != nil {
 		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
@@ -83,13 +84,13 @@ func (c *auditSinks) List(opts v1.ListOptions) (result *v1alpha1.AuditSinkList,
 		Resource("auditsinks").
 		VersionedParams(&opts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Watch returns a watch.Interface that watches the requested auditSinks.
-func (c *auditSinks) Watch(opts v1.ListOptions) (watch.Interface, error) {
+func (c *auditSinks) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
 	var timeout time.Duration
 	if opts.TimeoutSeconds != nil {
 		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
@@ -99,66 +100,69 @@ func (c *auditSinks) Watch(opts v1.ListOptions) (watch.Interface, error) {
 		Resource("auditsinks").
 		VersionedParams(&opts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Watch()
+		Watch(ctx)
 }
 
 // Create takes the representation of a auditSink and creates it.  Returns the server's representation of the auditSink, and an error, if there is any.
-func (c *auditSinks) Create(auditSink *v1alpha1.AuditSink) (result *v1alpha1.AuditSink, err error) {
+func (c *auditSinks) Create(ctx context.Context, auditSink *v1alpha1.AuditSink, opts v1.CreateOptions) (result *v1alpha1.AuditSink, err error) {
 	result = &v1alpha1.AuditSink{}
 	err = c.client.Post().
 		Resource("auditsinks").
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(auditSink).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Update takes the representation of a auditSink and updates it. Returns the server's representation of the auditSink, and an error, if there is any.
-func (c *auditSinks) Update(auditSink *v1alpha1.AuditSink) (result *v1alpha1.AuditSink, err error) {
+func (c *auditSinks) Update(ctx context.Context, auditSink *v1alpha1.AuditSink, opts v1.UpdateOptions) (result *v1alpha1.AuditSink, err error) {
 	result = &v1alpha1.AuditSink{}
 	err = c.client.Put().
 		Resource("auditsinks").
 		Name(auditSink.Name).
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(auditSink).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Delete takes name of the auditSink and deletes it. Returns an error if one occurs.
-func (c *auditSinks) Delete(name string, options *v1.DeleteOptions) error {
+func (c *auditSinks) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
 	return c.client.Delete().
 		Resource("auditsinks").
 		Name(name).
-		Body(options).
-		Do().
+		Body(&opts).
+		Do(ctx).
 		Error()
 }
 
 // DeleteCollection deletes a collection of objects.
-func (c *auditSinks) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
+func (c *auditSinks) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
 	var timeout time.Duration
-	if listOptions.TimeoutSeconds != nil {
-		timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second
+	if listOpts.TimeoutSeconds != nil {
+		timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
 	}
 	return c.client.Delete().
 		Resource("auditsinks").
-		VersionedParams(&listOptions, scheme.ParameterCodec).
+		VersionedParams(&listOpts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Body(options).
-		Do().
+		Body(&opts).
+		Do(ctx).
 		Error()
 }
 
 // Patch applies the patch and returns the patched auditSink.
-func (c *auditSinks) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.AuditSink, err error) {
+func (c *auditSinks) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.AuditSink, err error) {
 	result = &v1alpha1.AuditSink{}
 	err = c.client.Patch(pt).
 		Resource("auditsinks").
-		SubResource(subresources...).
 		Name(name).
+		SubResource(subresources...).
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(data).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/generated_expansion.go
index 177209ec..0413fb2b 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/generated_expansion.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/generated_expansion.go
@@ -17,3 +17,5 @@ limitations under the License.
 // Code generated by client-gen. DO NOT EDIT.
 
 package v1
+
+type TokenReviewExpansion interface{}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/tokenreview.go b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/tokenreview.go
index 25a8d6a1..ca7cd47d 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/tokenreview.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/tokenreview.go
@@ -19,6 +19,11 @@ limitations under the License.
 package v1
 
 import (
+	"context"
+
+	v1 "k8s.io/api/authentication/v1"
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	scheme "k8s.io/client-go/kubernetes/scheme"
 	rest "k8s.io/client-go/rest"
 )
 
@@ -30,6 +35,7 @@ type TokenReviewsGetter interface {
 
 // TokenReviewInterface has methods to work with TokenReview resources.
 type TokenReviewInterface interface {
+	Create(ctx context.Context, tokenReview *v1.TokenReview, opts metav1.CreateOptions) (*v1.TokenReview, error)
 	TokenReviewExpansion
 }
 
@@ -44,3 +50,15 @@ func newTokenReviews(c *AuthenticationV1Client) *tokenReviews {
 		client: c.RESTClient(),
 	}
 }
+
+// Create takes the representation of a tokenReview and creates it.  Returns the server's representation of the tokenReview, and an error, if there is any.
+func (c *tokenReviews) Create(ctx context.Context, tokenReview *v1.TokenReview, opts metav1.CreateOptions) (result *v1.TokenReview, err error) {
+	result = &v1.TokenReview{}
+	err = c.client.Post().
+		Resource("tokenreviews").
+		VersionedParams(&opts, scheme.ParameterCodec).
+		Body(tokenReview).
+		Do(ctx).
+		Into(result)
+	return
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/tokenreview_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/tokenreview_expansion.go
deleted file mode 100644
index 8a21b7c7..00000000
--- a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/tokenreview_expansion.go
+++ /dev/null
@@ -1,43 +0,0 @@
-/*
-Copyright 2017 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package v1
-
-import (
-	"context"
-
-	authenticationapi "k8s.io/api/authentication/v1"
-)
-
-type TokenReviewExpansion interface {
-	Create(tokenReview *authenticationapi.TokenReview) (result *authenticationapi.TokenReview, err error)
-	CreateContext(ctx context.Context, tokenReview *authenticationapi.TokenReview) (result *authenticationapi.TokenReview, err error)
-}
-
-func (c *tokenReviews) Create(tokenReview *authenticationapi.TokenReview) (result *authenticationapi.TokenReview, err error) {
-	return c.CreateContext(context.Background(), tokenReview)
-}
-
-func (c *tokenReviews) CreateContext(ctx context.Context, tokenReview *authenticationapi.TokenReview) (result *authenticationapi.TokenReview, err error) {
-	result = &authenticationapi.TokenReview{}
-	err = c.client.Post().
-		Context(ctx).
-		Resource("tokenreviews").
-		Body(tokenReview).
-		Do().
-		Into(result)
-	return
-}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/generated_expansion.go
index f6df7696..60bf15ab 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/generated_expansion.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/generated_expansion.go
@@ -17,3 +17,5 @@ limitations under the License.
 // Code generated by client-gen. DO NOT EDIT.
 
 package v1beta1
+
+type TokenReviewExpansion interface{}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/tokenreview.go b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/tokenreview.go
index 0ac3561e..5da12243 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/tokenreview.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/tokenreview.go
@@ -19,6 +19,11 @@ limitations under the License.
 package v1beta1
 
 import (
+	"context"
+
+	v1beta1 "k8s.io/api/authentication/v1beta1"
+	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	scheme "k8s.io/client-go/kubernetes/scheme"
 	rest "k8s.io/client-go/rest"
 )
 
@@ -30,6 +35,7 @@ type TokenReviewsGetter interface {
 
 // TokenReviewInterface has methods to work with TokenReview resources.
 type TokenReviewInterface interface {
+	Create(ctx context.Context, tokenReview *v1beta1.TokenReview, opts v1.CreateOptions) (*v1beta1.TokenReview, error)
 	TokenReviewExpansion
 }
 
@@ -44,3 +50,15 @@ func newTokenReviews(c *AuthenticationV1beta1Client) *tokenReviews {
 		client: c.RESTClient(),
 	}
 }
+
+// Create takes the representation of a tokenReview and creates it.  Returns the server's representation of the tokenReview, and an error, if there is any.
+func (c *tokenReviews) Create(ctx context.Context, tokenReview *v1beta1.TokenReview, opts v1.CreateOptions) (result *v1beta1.TokenReview, err error) {
+	result = &v1beta1.TokenReview{}
+	err = c.client.Post().
+		Resource("tokenreviews").
+		VersionedParams(&opts, scheme.ParameterCodec).
+		Body(tokenReview).
+		Do(ctx).
+		Into(result)
+	return
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/tokenreview_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/tokenreview_expansion.go
deleted file mode 100644
index 0476b173..00000000
--- a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/tokenreview_expansion.go
+++ /dev/null
@@ -1,43 +0,0 @@
-/*
-Copyright 2016 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package v1beta1
-
-import (
-	"context"
-
-	authenticationapi "k8s.io/api/authentication/v1beta1"
-)
-
-type TokenReviewExpansion interface {
-	Create(tokenReview *authenticationapi.TokenReview) (result *authenticationapi.TokenReview, err error)
-	CreateContext(ctx context.Context, tokenReview *authenticationapi.TokenReview) (result *authenticationapi.TokenReview, err error)
-}
-
-func (c *tokenReviews) Create(tokenReview *authenticationapi.TokenReview) (result *authenticationapi.TokenReview, err error) {
-	return c.CreateContext(context.Background(), tokenReview)
-}
-
-func (c *tokenReviews) CreateContext(ctx context.Context, tokenReview *authenticationapi.TokenReview) (result *authenticationapi.TokenReview, err error) {
-	result = &authenticationapi.TokenReview{}
-	err = c.client.Post().
-		Context(ctx).
-		Resource("tokenreviews").
-		Body(tokenReview).
-		Do().
-		Into(result)
-	return
-}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/generated_expansion.go
index 177209ec..fe8c72cd 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/generated_expansion.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/generated_expansion.go
@@ -17,3 +17,11 @@ limitations under the License.
 // Code generated by client-gen. DO NOT EDIT.
 
 package v1
+
+type LocalSubjectAccessReviewExpansion interface{}
+
+type SelfSubjectAccessReviewExpansion interface{}
+
+type SelfSubjectRulesReviewExpansion interface{}
+
+type SubjectAccessReviewExpansion interface{}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/localsubjectaccessreview.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/localsubjectaccessreview.go
index 0292c786..84b2efe1 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/localsubjectaccessreview.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/localsubjectaccessreview.go
@@ -19,6 +19,11 @@ limitations under the License.
 package v1
 
 import (
+	"context"
+
+	v1 "k8s.io/api/authorization/v1"
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	scheme "k8s.io/client-go/kubernetes/scheme"
 	rest "k8s.io/client-go/rest"
 )
 
@@ -30,6 +35,7 @@ type LocalSubjectAccessReviewsGetter interface {
 
 // LocalSubjectAccessReviewInterface has methods to work with LocalSubjectAccessReview resources.
 type LocalSubjectAccessReviewInterface interface {
+	Create(ctx context.Context, localSubjectAccessReview *v1.LocalSubjectAccessReview, opts metav1.CreateOptions) (*v1.LocalSubjectAccessReview, error)
 	LocalSubjectAccessReviewExpansion
 }
 
@@ -46,3 +52,16 @@ func newLocalSubjectAccessReviews(c *AuthorizationV1Client, namespace string) *l
 		ns:     namespace,
 	}
 }
+
+// Create takes the representation of a localSubjectAccessReview and creates it.  Returns the server's representation of the localSubjectAccessReview, and an error, if there is any.
+func (c *localSubjectAccessReviews) Create(ctx context.Context, localSubjectAccessReview *v1.LocalSubjectAccessReview, opts metav1.CreateOptions) (result *v1.LocalSubjectAccessReview, err error) {
+	result = &v1.LocalSubjectAccessReview{}
+	err = c.client.Post().
+		Namespace(c.ns).
+		Resource("localsubjectaccessreviews").
+		VersionedParams(&opts, scheme.ParameterCodec).
+		Body(localSubjectAccessReview).
+		Do(ctx).
+		Into(result)
+	return
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/localsubjectaccessreview_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/localsubjectaccessreview_expansion.go
deleted file mode 100644
index 9836308b..00000000
--- a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/localsubjectaccessreview_expansion.go
+++ /dev/null
@@ -1,44 +0,0 @@
-/*
-Copyright 2017 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package v1
-
-import (
-	"context"
-
-	authorizationapi "k8s.io/api/authorization/v1"
-)
-
-type LocalSubjectAccessReviewExpansion interface {
-	Create(sar *authorizationapi.LocalSubjectAccessReview) (result *authorizationapi.LocalSubjectAccessReview, err error)
-	CreateContext(ctx context.Context, sar *authorizationapi.LocalSubjectAccessReview) (result *authorizationapi.LocalSubjectAccessReview, err error)
-}
-
-func (c *localSubjectAccessReviews) Create(sar *authorizationapi.LocalSubjectAccessReview) (result *authorizationapi.LocalSubjectAccessReview, err error) {
-	return c.CreateContext(context.Background(), sar)
-}
-
-func (c *localSubjectAccessReviews) CreateContext(ctx context.Context, sar *authorizationapi.LocalSubjectAccessReview) (result *authorizationapi.LocalSubjectAccessReview, err error) {
-	result = &authorizationapi.LocalSubjectAccessReview{}
-	err = c.client.Post().
-		Context(ctx).
-		Namespace(c.ns).
-		Resource("localsubjectaccessreviews").
-		Body(sar).
-		Do().
-		Into(result)
-	return
-}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/selfsubjectaccessreview.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/selfsubjectaccessreview.go
index 1e3a4581..2006196c 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/selfsubjectaccessreview.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/selfsubjectaccessreview.go
@@ -19,6 +19,11 @@ limitations under the License.
 package v1
 
 import (
+	"context"
+
+	v1 "k8s.io/api/authorization/v1"
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	scheme "k8s.io/client-go/kubernetes/scheme"
 	rest "k8s.io/client-go/rest"
 )
 
@@ -30,6 +35,7 @@ type SelfSubjectAccessReviewsGetter interface {
 
 // SelfSubjectAccessReviewInterface has methods to work with SelfSubjectAccessReview resources.
 type SelfSubjectAccessReviewInterface interface {
+	Create(ctx context.Context, selfSubjectAccessReview *v1.SelfSubjectAccessReview, opts metav1.CreateOptions) (*v1.SelfSubjectAccessReview, error)
 	SelfSubjectAccessReviewExpansion
 }
 
@@ -44,3 +50,15 @@ func newSelfSubjectAccessReviews(c *AuthorizationV1Client) *selfSubjectAccessRev
 		client: c.RESTClient(),
 	}
 }
+
+// Create takes the representation of a selfSubjectAccessReview and creates it.  Returns the server's representation of the selfSubjectAccessReview, and an error, if there is any.
+func (c *selfSubjectAccessReviews) Create(ctx context.Context, selfSubjectAccessReview *v1.SelfSubjectAccessReview, opts metav1.CreateOptions) (result *v1.SelfSubjectAccessReview, err error) {
+	result = &v1.SelfSubjectAccessReview{}
+	err = c.client.Post().
+		Resource("selfsubjectaccessreviews").
+		VersionedParams(&opts, scheme.ParameterCodec).
+		Body(selfSubjectAccessReview).
+		Do(ctx).
+		Into(result)
+	return
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/selfsubjectaccessreview_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/selfsubjectaccessreview_expansion.go
deleted file mode 100644
index 916e5b43..00000000
--- a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/selfsubjectaccessreview_expansion.go
+++ /dev/null
@@ -1,43 +0,0 @@
-/*
-Copyright 2017 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package v1
-
-import (
-	"context"
-
-	authorizationapi "k8s.io/api/authorization/v1"
-)
-
-type SelfSubjectAccessReviewExpansion interface {
-	Create(sar *authorizationapi.SelfSubjectAccessReview) (result *authorizationapi.SelfSubjectAccessReview, err error)
-	CreateContext(ctx context.Context, sar *authorizationapi.SelfSubjectAccessReview) (result *authorizationapi.SelfSubjectAccessReview, err error)
-}
-
-func (c *selfSubjectAccessReviews) Create(sar *authorizationapi.SelfSubjectAccessReview) (result *authorizationapi.SelfSubjectAccessReview, err error) {
-	return c.CreateContext(context.Background(), sar)
-}
-
-func (c *selfSubjectAccessReviews) CreateContext(ctx context.Context, sar *authorizationapi.SelfSubjectAccessReview) (result *authorizationapi.SelfSubjectAccessReview, err error) {
-	result = &authorizationapi.SelfSubjectAccessReview{}
-	err = c.client.Post().
-		Context(ctx).
-		Resource("selfsubjectaccessreviews").
-		Body(sar).
-		Do().
-		Into(result)
-	return
-}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/selfsubjectrulesreview.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/selfsubjectrulesreview.go
index 50a0233e..25d99f7b 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/selfsubjectrulesreview.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/selfsubjectrulesreview.go
@@ -19,6 +19,11 @@ limitations under the License.
 package v1
 
 import (
+	"context"
+
+	v1 "k8s.io/api/authorization/v1"
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	scheme "k8s.io/client-go/kubernetes/scheme"
 	rest "k8s.io/client-go/rest"
 )
 
@@ -30,6 +35,7 @@ type SelfSubjectRulesReviewsGetter interface {
 
 // SelfSubjectRulesReviewInterface has methods to work with SelfSubjectRulesReview resources.
 type SelfSubjectRulesReviewInterface interface {
+	Create(ctx context.Context, selfSubjectRulesReview *v1.SelfSubjectRulesReview, opts metav1.CreateOptions) (*v1.SelfSubjectRulesReview, error)
 	SelfSubjectRulesReviewExpansion
 }
 
@@ -44,3 +50,15 @@ func newSelfSubjectRulesReviews(c *AuthorizationV1Client) *selfSubjectRulesRevie
 		client: c.RESTClient(),
 	}
 }
+
+// Create takes the representation of a selfSubjectRulesReview and creates it.  Returns the server's representation of the selfSubjectRulesReview, and an error, if there is any.
+func (c *selfSubjectRulesReviews) Create(ctx context.Context, selfSubjectRulesReview *v1.SelfSubjectRulesReview, opts metav1.CreateOptions) (result *v1.SelfSubjectRulesReview, err error) {
+	result = &v1.SelfSubjectRulesReview{}
+	err = c.client.Post().
+		Resource("selfsubjectrulesreviews").
+		VersionedParams(&opts, scheme.ParameterCodec).
+		Body(selfSubjectRulesReview).
+		Do(ctx).
+		Into(result)
+	return
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/selfsubjectrulesreview_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/selfsubjectrulesreview_expansion.go
deleted file mode 100644
index 365282ed..00000000
--- a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/selfsubjectrulesreview_expansion.go
+++ /dev/null
@@ -1,43 +0,0 @@
-/*
-Copyright 2017 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package v1
-
-import (
-	"context"
-
-	authorizationapi "k8s.io/api/authorization/v1"
-)
-
-type SelfSubjectRulesReviewExpansion interface {
-	Create(srr *authorizationapi.SelfSubjectRulesReview) (result *authorizationapi.SelfSubjectRulesReview, err error)
-	CreateContext(ctx context.Context, srr *authorizationapi.SelfSubjectRulesReview) (result *authorizationapi.SelfSubjectRulesReview, err error)
-}
-
-func (c *selfSubjectRulesReviews) Create(srr *authorizationapi.SelfSubjectRulesReview) (result *authorizationapi.SelfSubjectRulesReview, err error) {
-	return c.CreateContext(context.Background(), srr)
-}
-
-func (c *selfSubjectRulesReviews) CreateContext(ctx context.Context, srr *authorizationapi.SelfSubjectRulesReview) (result *authorizationapi.SelfSubjectRulesReview, err error) {
-	result = &authorizationapi.SelfSubjectRulesReview{}
-	err = c.client.Post().
-		Context(ctx).
-		Resource("selfsubjectrulesreviews").
-		Body(srr).
-		Do().
-		Into(result)
-	return
-}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/subjectaccessreview.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/subjectaccessreview.go
index 9c09008c..8ac0566a 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/subjectaccessreview.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/subjectaccessreview.go
@@ -19,6 +19,11 @@ limitations under the License.
 package v1
 
 import (
+	"context"
+
+	v1 "k8s.io/api/authorization/v1"
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	scheme "k8s.io/client-go/kubernetes/scheme"
 	rest "k8s.io/client-go/rest"
 )
 
@@ -30,6 +35,7 @@ type SubjectAccessReviewsGetter interface {
 
 // SubjectAccessReviewInterface has methods to work with SubjectAccessReview resources.
 type SubjectAccessReviewInterface interface {
+	Create(ctx context.Context, subjectAccessReview *v1.SubjectAccessReview, opts metav1.CreateOptions) (*v1.SubjectAccessReview, error)
 	SubjectAccessReviewExpansion
 }
 
@@ -44,3 +50,15 @@ func newSubjectAccessReviews(c *AuthorizationV1Client) *subjectAccessReviews {
 		client: c.RESTClient(),
 	}
 }
+
+// Create takes the representation of a subjectAccessReview and creates it.  Returns the server's representation of the subjectAccessReview, and an error, if there is any.
+func (c *subjectAccessReviews) Create(ctx context.Context, subjectAccessReview *v1.SubjectAccessReview, opts metav1.CreateOptions) (result *v1.SubjectAccessReview, err error) {
+	result = &v1.SubjectAccessReview{}
+	err = c.client.Post().
+		Resource("subjectaccessreviews").
+		VersionedParams(&opts, scheme.ParameterCodec).
+		Body(subjectAccessReview).
+		Do(ctx).
+		Into(result)
+	return
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/subjectaccessreview_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/subjectaccessreview_expansion.go
deleted file mode 100644
index 927544f1..00000000
--- a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/subjectaccessreview_expansion.go
+++ /dev/null
@@ -1,44 +0,0 @@
-/*
-Copyright 2017 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package v1
-
-import (
-	"context"
-
-	authorizationapi "k8s.io/api/authorization/v1"
-)
-
-// The SubjectAccessReviewExpansion interface allows manually adding extra methods to the AuthorizationInterface.
-type SubjectAccessReviewExpansion interface {
-	Create(sar *authorizationapi.SubjectAccessReview) (result *authorizationapi.SubjectAccessReview, err error)
-	CreateContext(ctx context.Context, sar *authorizationapi.SubjectAccessReview) (result *authorizationapi.SubjectAccessReview, err error)
-}
-
-func (c *subjectAccessReviews) Create(sar *authorizationapi.SubjectAccessReview) (result *authorizationapi.SubjectAccessReview, err error) {
-	return c.CreateContext(context.Background(), sar)
-}
-
-func (c *subjectAccessReviews) CreateContext(ctx context.Context, sar *authorizationapi.SubjectAccessReview) (result *authorizationapi.SubjectAccessReview, err error) {
-	result = &authorizationapi.SubjectAccessReview{}
-	err = c.client.Post().
-		Context(ctx).
-		Resource("subjectaccessreviews").
-		Body(sar).
-		Do().
-		Into(result)
-	return
-}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/generated_expansion.go
index f6df7696..ae238830 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/generated_expansion.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/generated_expansion.go
@@ -17,3 +17,11 @@ limitations under the License.
 // Code generated by client-gen. DO NOT EDIT.
 
 package v1beta1
+
+type LocalSubjectAccessReviewExpansion interface{}
+
+type SelfSubjectAccessReviewExpansion interface{}
+
+type SelfSubjectRulesReviewExpansion interface{}
+
+type SubjectAccessReviewExpansion interface{}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/localsubjectaccessreview.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/localsubjectaccessreview.go
index f5e86a76..78584ba9 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/localsubjectaccessreview.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/localsubjectaccessreview.go
@@ -19,6 +19,11 @@ limitations under the License.
 package v1beta1
 
 import (
+	"context"
+
+	v1beta1 "k8s.io/api/authorization/v1beta1"
+	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	scheme "k8s.io/client-go/kubernetes/scheme"
 	rest "k8s.io/client-go/rest"
 )
 
@@ -30,6 +35,7 @@ type LocalSubjectAccessReviewsGetter interface {
 
 // LocalSubjectAccessReviewInterface has methods to work with LocalSubjectAccessReview resources.
 type LocalSubjectAccessReviewInterface interface {
+	Create(ctx context.Context, localSubjectAccessReview *v1beta1.LocalSubjectAccessReview, opts v1.CreateOptions) (*v1beta1.LocalSubjectAccessReview, error)
 	LocalSubjectAccessReviewExpansion
 }
 
@@ -46,3 +52,16 @@ func newLocalSubjectAccessReviews(c *AuthorizationV1beta1Client, namespace strin
 		ns:     namespace,
 	}
 }
+
+// Create takes the representation of a localSubjectAccessReview and creates it.  Returns the server's representation of the localSubjectAccessReview, and an error, if there is any.
+func (c *localSubjectAccessReviews) Create(ctx context.Context, localSubjectAccessReview *v1beta1.LocalSubjectAccessReview, opts v1.CreateOptions) (result *v1beta1.LocalSubjectAccessReview, err error) {
+	result = &v1beta1.LocalSubjectAccessReview{}
+	err = c.client.Post().
+		Namespace(c.ns).
+		Resource("localsubjectaccessreviews").
+		VersionedParams(&opts, scheme.ParameterCodec).
+		Body(localSubjectAccessReview).
+		Do(ctx).
+		Into(result)
+	return
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/localsubjectaccessreview_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/localsubjectaccessreview_expansion.go
deleted file mode 100644
index 148cf628..00000000
--- a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/localsubjectaccessreview_expansion.go
+++ /dev/null
@@ -1,44 +0,0 @@
-/*
-Copyright 2016 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package v1beta1
-
-import (
-	"context"
-
-	authorizationapi "k8s.io/api/authorization/v1beta1"
-)
-
-type LocalSubjectAccessReviewExpansion interface {
-	Create(sar *authorizationapi.LocalSubjectAccessReview) (result *authorizationapi.LocalSubjectAccessReview, err error)
-	CreateContext(ctx context.Context, sar *authorizationapi.LocalSubjectAccessReview) (result *authorizationapi.LocalSubjectAccessReview, err error)
-}
-
-func (c *localSubjectAccessReviews) Create(sar *authorizationapi.LocalSubjectAccessReview) (result *authorizationapi.LocalSubjectAccessReview, err error) {
-	return c.CreateContext(context.Background(), sar)
-}
-
-func (c *localSubjectAccessReviews) CreateContext(ctx context.Context, sar *authorizationapi.LocalSubjectAccessReview) (result *authorizationapi.LocalSubjectAccessReview, err error) {
-	result = &authorizationapi.LocalSubjectAccessReview{}
-	err = c.client.Post().
-		Context(ctx).
-		Namespace(c.ns).
-		Resource("localsubjectaccessreviews").
-		Body(sar).
-		Do().
-		Into(result)
-	return
-}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/selfsubjectaccessreview.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/selfsubjectaccessreview.go
index 906712cc..0286c93f 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/selfsubjectaccessreview.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/selfsubjectaccessreview.go
@@ -19,6 +19,11 @@ limitations under the License.
 package v1beta1
 
 import (
+	"context"
+
+	v1beta1 "k8s.io/api/authorization/v1beta1"
+	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	scheme "k8s.io/client-go/kubernetes/scheme"
 	rest "k8s.io/client-go/rest"
 )
 
@@ -30,6 +35,7 @@ type SelfSubjectAccessReviewsGetter interface {
 
 // SelfSubjectAccessReviewInterface has methods to work with SelfSubjectAccessReview resources.
 type SelfSubjectAccessReviewInterface interface {
+	Create(ctx context.Context, selfSubjectAccessReview *v1beta1.SelfSubjectAccessReview, opts v1.CreateOptions) (*v1beta1.SelfSubjectAccessReview, error)
 	SelfSubjectAccessReviewExpansion
 }
 
@@ -44,3 +50,15 @@ func newSelfSubjectAccessReviews(c *AuthorizationV1beta1Client) *selfSubjectAcce
 		client: c.RESTClient(),
 	}
 }
+
+// Create takes the representation of a selfSubjectAccessReview and creates it.  Returns the server's representation of the selfSubjectAccessReview, and an error, if there is any.
+func (c *selfSubjectAccessReviews) Create(ctx context.Context, selfSubjectAccessReview *v1beta1.SelfSubjectAccessReview, opts v1.CreateOptions) (result *v1beta1.SelfSubjectAccessReview, err error) {
+	result = &v1beta1.SelfSubjectAccessReview{}
+	err = c.client.Post().
+		Resource("selfsubjectaccessreviews").
+		VersionedParams(&opts, scheme.ParameterCodec).
+		Body(selfSubjectAccessReview).
+		Do(ctx).
+		Into(result)
+	return
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/selfsubjectaccessreview_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/selfsubjectaccessreview_expansion.go
deleted file mode 100644
index 6edead0e..00000000
--- a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/selfsubjectaccessreview_expansion.go
+++ /dev/null
@@ -1,43 +0,0 @@
-/*
-Copyright 2016 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package v1beta1
-
-import (
-	"context"
-
-	authorizationapi "k8s.io/api/authorization/v1beta1"
-)
-
-type SelfSubjectAccessReviewExpansion interface {
-	Create(sar *authorizationapi.SelfSubjectAccessReview) (result *authorizationapi.SelfSubjectAccessReview, err error)
-	CreateContext(ctx context.Context, sar *authorizationapi.SelfSubjectAccessReview) (result *authorizationapi.SelfSubjectAccessReview, err error)
-}
-
-func (c *selfSubjectAccessReviews) Create(sar *authorizationapi.SelfSubjectAccessReview) (result *authorizationapi.SelfSubjectAccessReview, err error) {
-	return c.CreateContext(context.Background(), sar)
-}
-
-func (c *selfSubjectAccessReviews) CreateContext(ctx context.Context, sar *authorizationapi.SelfSubjectAccessReview) (result *authorizationapi.SelfSubjectAccessReview, err error) {
-	result = &authorizationapi.SelfSubjectAccessReview{}
-	err = c.client.Post().
-		Context(ctx).
-		Resource("selfsubjectaccessreviews").
-		Body(sar).
-		Do().
-		Into(result)
-	return
-}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/selfsubjectrulesreview.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/selfsubjectrulesreview.go
index 56c0f99d..d772973e 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/selfsubjectrulesreview.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/selfsubjectrulesreview.go
@@ -19,6 +19,11 @@ limitations under the License.
 package v1beta1
 
 import (
+	"context"
+
+	v1beta1 "k8s.io/api/authorization/v1beta1"
+	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	scheme "k8s.io/client-go/kubernetes/scheme"
 	rest "k8s.io/client-go/rest"
 )
 
@@ -30,6 +35,7 @@ type SelfSubjectRulesReviewsGetter interface {
 
 // SelfSubjectRulesReviewInterface has methods to work with SelfSubjectRulesReview resources.
 type SelfSubjectRulesReviewInterface interface {
+	Create(ctx context.Context, selfSubjectRulesReview *v1beta1.SelfSubjectRulesReview, opts v1.CreateOptions) (*v1beta1.SelfSubjectRulesReview, error)
 	SelfSubjectRulesReviewExpansion
 }
 
@@ -44,3 +50,15 @@ func newSelfSubjectRulesReviews(c *AuthorizationV1beta1Client) *selfSubjectRules
 		client: c.RESTClient(),
 	}
 }
+
+// Create takes the representation of a selfSubjectRulesReview and creates it.  Returns the server's representation of the selfSubjectRulesReview, and an error, if there is any.
+func (c *selfSubjectRulesReviews) Create(ctx context.Context, selfSubjectRulesReview *v1beta1.SelfSubjectRulesReview, opts v1.CreateOptions) (result *v1beta1.SelfSubjectRulesReview, err error) {
+	result = &v1beta1.SelfSubjectRulesReview{}
+	err = c.client.Post().
+		Resource("selfsubjectrulesreviews").
+		VersionedParams(&opts, scheme.ParameterCodec).
+		Body(selfSubjectRulesReview).
+		Do(ctx).
+		Into(result)
+	return
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/selfsubjectrulesreview_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/selfsubjectrulesreview_expansion.go
deleted file mode 100644
index a459d5c3..00000000
--- a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/selfsubjectrulesreview_expansion.go
+++ /dev/null
@@ -1,43 +0,0 @@
-/*
-Copyright 2017 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package v1beta1
-
-import (
-	"context"
-
-	authorizationapi "k8s.io/api/authorization/v1beta1"
-)
-
-type SelfSubjectRulesReviewExpansion interface {
-	Create(srr *authorizationapi.SelfSubjectRulesReview) (result *authorizationapi.SelfSubjectRulesReview, err error)
-	CreateContext(ctx context.Context, srr *authorizationapi.SelfSubjectRulesReview) (result *authorizationapi.SelfSubjectRulesReview, err error)
-}
-
-func (c *selfSubjectRulesReviews) Create(srr *authorizationapi.SelfSubjectRulesReview) (result *authorizationapi.SelfSubjectRulesReview, err error) {
-	return c.CreateContext(context.Background(), srr)
-}
-
-func (c *selfSubjectRulesReviews) CreateContext(ctx context.Context, srr *authorizationapi.SelfSubjectRulesReview) (result *authorizationapi.SelfSubjectRulesReview, err error) {
-	result = &authorizationapi.SelfSubjectRulesReview{}
-	err = c.client.Post().
-		Context(ctx).
-		Resource("selfsubjectrulesreviews").
-		Body(srr).
-		Do().
-		Into(result)
-	return
-}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/subjectaccessreview.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/subjectaccessreview.go
index 79f1ec53..aebe8398 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/subjectaccessreview.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/subjectaccessreview.go
@@ -19,6 +19,11 @@ limitations under the License.
 package v1beta1
 
 import (
+	"context"
+
+	v1beta1 "k8s.io/api/authorization/v1beta1"
+	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	scheme "k8s.io/client-go/kubernetes/scheme"
 	rest "k8s.io/client-go/rest"
 )
 
@@ -30,6 +35,7 @@ type SubjectAccessReviewsGetter interface {
 
 // SubjectAccessReviewInterface has methods to work with SubjectAccessReview resources.
 type SubjectAccessReviewInterface interface {
+	Create(ctx context.Context, subjectAccessReview *v1beta1.SubjectAccessReview, opts v1.CreateOptions) (*v1beta1.SubjectAccessReview, error)
 	SubjectAccessReviewExpansion
 }
 
@@ -44,3 +50,15 @@ func newSubjectAccessReviews(c *AuthorizationV1beta1Client) *subjectAccessReview
 		client: c.RESTClient(),
 	}
 }
+
+// Create takes the representation of a subjectAccessReview and creates it.  Returns the server's representation of the subjectAccessReview, and an error, if there is any.
+func (c *subjectAccessReviews) Create(ctx context.Context, subjectAccessReview *v1beta1.SubjectAccessReview, opts v1.CreateOptions) (result *v1beta1.SubjectAccessReview, err error) {
+	result = &v1beta1.SubjectAccessReview{}
+	err = c.client.Post().
+		Resource("subjectaccessreviews").
+		VersionedParams(&opts, scheme.ParameterCodec).
+		Body(subjectAccessReview).
+		Do(ctx).
+		Into(result)
+	return
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/subjectaccessreview_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/subjectaccessreview_expansion.go
deleted file mode 100644
index 7072e29c..00000000
--- a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/subjectaccessreview_expansion.go
+++ /dev/null
@@ -1,44 +0,0 @@
-/*
-Copyright 2016 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package v1beta1
-
-import (
-	"context"
-
-	authorizationapi "k8s.io/api/authorization/v1beta1"
-)
-
-// The SubjectAccessReviewExpansion interface allows manually adding extra methods to the AuthorizationInterface.
-type SubjectAccessReviewExpansion interface {
-	Create(sar *authorizationapi.SubjectAccessReview) (result *authorizationapi.SubjectAccessReview, err error)
-	CreateContext(ctx context.Context, sar *authorizationapi.SubjectAccessReview) (result *authorizationapi.SubjectAccessReview, err error)
-}
-
-func (c *subjectAccessReviews) Create(sar *authorizationapi.SubjectAccessReview) (result *authorizationapi.SubjectAccessReview, err error) {
-	return c.CreateContext(context.Background(), sar)
-}
-
-func (c *subjectAccessReviews) CreateContext(ctx context.Context, sar *authorizationapi.SubjectAccessReview) (result *authorizationapi.SubjectAccessReview, err error) {
-	result = &authorizationapi.SubjectAccessReview{}
-	err = c.client.Post().
-		Context(ctx).
-		Resource("subjectaccessreviews").
-		Body(sar).
-		Do().
-		Into(result)
-	return
-}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/horizontalpodautoscaler.go b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/horizontalpodautoscaler.go
index 0e0839fb..ca8e0da8 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/horizontalpodautoscaler.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/horizontalpodautoscaler.go
@@ -19,6 +19,7 @@ limitations under the License.
 package v1
 
 import (
+	"context"
 	"time"
 
 	v1 "k8s.io/api/autoscaling/v1"
@@ -37,15 +38,15 @@ type HorizontalPodAutoscalersGetter interface {
 
 // HorizontalPodAutoscalerInterface has methods to work with HorizontalPodAutoscaler resources.
 type HorizontalPodAutoscalerInterface interface {
-	Create(*v1.HorizontalPodAutoscaler) (*v1.HorizontalPodAutoscaler, error)
-	Update(*v1.HorizontalPodAutoscaler) (*v1.HorizontalPodAutoscaler, error)
-	UpdateStatus(*v1.HorizontalPodAutoscaler) (*v1.HorizontalPodAutoscaler, error)
-	Delete(name string, options *metav1.DeleteOptions) error
-	DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error
-	Get(name string, options metav1.GetOptions) (*v1.HorizontalPodAutoscaler, error)
-	List(opts metav1.ListOptions) (*v1.HorizontalPodAutoscalerList, error)
-	Watch(opts metav1.ListOptions) (watch.Interface, error)
-	Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.HorizontalPodAutoscaler, err error)
+	Create(ctx context.Context, horizontalPodAutoscaler *v1.HorizontalPodAutoscaler, opts metav1.CreateOptions) (*v1.HorizontalPodAutoscaler, error)
+	Update(ctx context.Context, horizontalPodAutoscaler *v1.HorizontalPodAutoscaler, opts metav1.UpdateOptions) (*v1.HorizontalPodAutoscaler, error)
+	UpdateStatus(ctx context.Context, horizontalPodAutoscaler *v1.HorizontalPodAutoscaler, opts metav1.UpdateOptions) (*v1.HorizontalPodAutoscaler, error)
+	Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error
+	DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error
+	Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.HorizontalPodAutoscaler, error)
+	List(ctx context.Context, opts metav1.ListOptions) (*v1.HorizontalPodAutoscalerList, error)
+	Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error)
+	Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.HorizontalPodAutoscaler, err error)
 	HorizontalPodAutoscalerExpansion
 }
 
@@ -64,20 +65,20 @@ func newHorizontalPodAutoscalers(c *AutoscalingV1Client, namespace string) *hori
 }
 
 // Get takes name of the horizontalPodAutoscaler, and returns the corresponding horizontalPodAutoscaler object, and an error if there is any.
-func (c *horizontalPodAutoscalers) Get(name string, options metav1.GetOptions) (result *v1.HorizontalPodAutoscaler, err error) {
+func (c *horizontalPodAutoscalers) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.HorizontalPodAutoscaler, err error) {
 	result = &v1.HorizontalPodAutoscaler{}
 	err = c.client.Get().
 		Namespace(c.ns).
 		Resource("horizontalpodautoscalers").
 		Name(name).
 		VersionedParams(&options, scheme.ParameterCodec).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // List takes label and field selectors, and returns the list of HorizontalPodAutoscalers that match those selectors.
-func (c *horizontalPodAutoscalers) List(opts metav1.ListOptions) (result *v1.HorizontalPodAutoscalerList, err error) {
+func (c *horizontalPodAutoscalers) List(ctx context.Context, opts metav1.ListOptions) (result *v1.HorizontalPodAutoscalerList, err error) {
 	var timeout time.Duration
 	if opts.TimeoutSeconds != nil {
 		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
@@ -88,13 +89,13 @@ func (c *horizontalPodAutoscalers) List(opts metav1.ListOptions) (result *v1.Hor
 		Resource("horizontalpodautoscalers").
 		VersionedParams(&opts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Watch returns a watch.Interface that watches the requested horizontalPodAutoscalers.
-func (c *horizontalPodAutoscalers) Watch(opts metav1.ListOptions) (watch.Interface, error) {
+func (c *horizontalPodAutoscalers) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
 	var timeout time.Duration
 	if opts.TimeoutSeconds != nil {
 		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
@@ -105,87 +106,90 @@ func (c *horizontalPodAutoscalers) Watch(opts metav1.ListOptions) (watch.Interfa
 		Resource("horizontalpodautoscalers").
 		VersionedParams(&opts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Watch()
+		Watch(ctx)
 }
 
 // Create takes the representation of a horizontalPodAutoscaler and creates it.  Returns the server's representation of the horizontalPodAutoscaler, and an error, if there is any.
-func (c *horizontalPodAutoscalers) Create(horizontalPodAutoscaler *v1.HorizontalPodAutoscaler) (result *v1.HorizontalPodAutoscaler, err error) {
+func (c *horizontalPodAutoscalers) Create(ctx context.Context, horizontalPodAutoscaler *v1.HorizontalPodAutoscaler, opts metav1.CreateOptions) (result *v1.HorizontalPodAutoscaler, err error) {
 	result = &v1.HorizontalPodAutoscaler{}
 	err = c.client.Post().
 		Namespace(c.ns).
 		Resource("horizontalpodautoscalers").
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(horizontalPodAutoscaler).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Update takes the representation of a horizontalPodAutoscaler and updates it. Returns the server's representation of the horizontalPodAutoscaler, and an error, if there is any.
-func (c *horizontalPodAutoscalers) Update(horizontalPodAutoscaler *v1.HorizontalPodAutoscaler) (result *v1.HorizontalPodAutoscaler, err error) {
+func (c *horizontalPodAutoscalers) Update(ctx context.Context, horizontalPodAutoscaler *v1.HorizontalPodAutoscaler, opts metav1.UpdateOptions) (result *v1.HorizontalPodAutoscaler, err error) {
 	result = &v1.HorizontalPodAutoscaler{}
 	err = c.client.Put().
 		Namespace(c.ns).
 		Resource("horizontalpodautoscalers").
 		Name(horizontalPodAutoscaler.Name).
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(horizontalPodAutoscaler).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // UpdateStatus was generated because the type contains a Status member.
 // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
-
-func (c *horizontalPodAutoscalers) UpdateStatus(horizontalPodAutoscaler *v1.HorizontalPodAutoscaler) (result *v1.HorizontalPodAutoscaler, err error) {
+func (c *horizontalPodAutoscalers) UpdateStatus(ctx context.Context, horizontalPodAutoscaler *v1.HorizontalPodAutoscaler, opts metav1.UpdateOptions) (result *v1.HorizontalPodAutoscaler, err error) {
 	result = &v1.HorizontalPodAutoscaler{}
 	err = c.client.Put().
 		Namespace(c.ns).
 		Resource("horizontalpodautoscalers").
 		Name(horizontalPodAutoscaler.Name).
 		SubResource("status").
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(horizontalPodAutoscaler).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Delete takes name of the horizontalPodAutoscaler and deletes it. Returns an error if one occurs.
-func (c *horizontalPodAutoscalers) Delete(name string, options *metav1.DeleteOptions) error {
+func (c *horizontalPodAutoscalers) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error {
 	return c.client.Delete().
 		Namespace(c.ns).
 		Resource("horizontalpodautoscalers").
 		Name(name).
-		Body(options).
-		Do().
+		Body(&opts).
+		Do(ctx).
 		Error()
 }
 
 // DeleteCollection deletes a collection of objects.
-func (c *horizontalPodAutoscalers) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error {
+func (c *horizontalPodAutoscalers) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error {
 	var timeout time.Duration
-	if listOptions.TimeoutSeconds != nil {
-		timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second
+	if listOpts.TimeoutSeconds != nil {
+		timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
 	}
 	return c.client.Delete().
 		Namespace(c.ns).
 		Resource("horizontalpodautoscalers").
-		VersionedParams(&listOptions, scheme.ParameterCodec).
+		VersionedParams(&listOpts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Body(options).
-		Do().
+		Body(&opts).
+		Do(ctx).
 		Error()
 }
 
 // Patch applies the patch and returns the patched horizontalPodAutoscaler.
-func (c *horizontalPodAutoscalers) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.HorizontalPodAutoscaler, err error) {
+func (c *horizontalPodAutoscalers) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.HorizontalPodAutoscaler, err error) {
 	result = &v1.HorizontalPodAutoscaler{}
 	err = c.client.Patch(pt).
 		Namespace(c.ns).
 		Resource("horizontalpodautoscalers").
-		SubResource(subresources...).
 		Name(name).
+		SubResource(subresources...).
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(data).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/horizontalpodautoscaler.go b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/horizontalpodautoscaler.go
index 02d5cfb9..f1637c1b 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/horizontalpodautoscaler.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/horizontalpodautoscaler.go
@@ -19,6 +19,7 @@ limitations under the License.
 package v2beta1
 
 import (
+	"context"
 	"time"
 
 	v2beta1 "k8s.io/api/autoscaling/v2beta1"
@@ -37,15 +38,15 @@ type HorizontalPodAutoscalersGetter interface {
 
 // HorizontalPodAutoscalerInterface has methods to work with HorizontalPodAutoscaler resources.
 type HorizontalPodAutoscalerInterface interface {
-	Create(*v2beta1.HorizontalPodAutoscaler) (*v2beta1.HorizontalPodAutoscaler, error)
-	Update(*v2beta1.HorizontalPodAutoscaler) (*v2beta1.HorizontalPodAutoscaler, error)
-	UpdateStatus(*v2beta1.HorizontalPodAutoscaler) (*v2beta1.HorizontalPodAutoscaler, error)
-	Delete(name string, options *v1.DeleteOptions) error
-	DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error
-	Get(name string, options v1.GetOptions) (*v2beta1.HorizontalPodAutoscaler, error)
-	List(opts v1.ListOptions) (*v2beta1.HorizontalPodAutoscalerList, error)
-	Watch(opts v1.ListOptions) (watch.Interface, error)
-	Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v2beta1.HorizontalPodAutoscaler, err error)
+	Create(ctx context.Context, horizontalPodAutoscaler *v2beta1.HorizontalPodAutoscaler, opts v1.CreateOptions) (*v2beta1.HorizontalPodAutoscaler, error)
+	Update(ctx context.Context, horizontalPodAutoscaler *v2beta1.HorizontalPodAutoscaler, opts v1.UpdateOptions) (*v2beta1.HorizontalPodAutoscaler, error)
+	UpdateStatus(ctx context.Context, horizontalPodAutoscaler *v2beta1.HorizontalPodAutoscaler, opts v1.UpdateOptions) (*v2beta1.HorizontalPodAutoscaler, error)
+	Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
+	DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
+	Get(ctx context.Context, name string, opts v1.GetOptions) (*v2beta1.HorizontalPodAutoscaler, error)
+	List(ctx context.Context, opts v1.ListOptions) (*v2beta1.HorizontalPodAutoscalerList, error)
+	Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
+	Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v2beta1.HorizontalPodAutoscaler, err error)
 	HorizontalPodAutoscalerExpansion
 }
 
@@ -64,20 +65,20 @@ func newHorizontalPodAutoscalers(c *AutoscalingV2beta1Client, namespace string)
 }
 
 // Get takes name of the horizontalPodAutoscaler, and returns the corresponding horizontalPodAutoscaler object, and an error if there is any.
-func (c *horizontalPodAutoscalers) Get(name string, options v1.GetOptions) (result *v2beta1.HorizontalPodAutoscaler, err error) {
+func (c *horizontalPodAutoscalers) Get(ctx context.Context, name string, options v1.GetOptions) (result *v2beta1.HorizontalPodAutoscaler, err error) {
 	result = &v2beta1.HorizontalPodAutoscaler{}
 	err = c.client.Get().
 		Namespace(c.ns).
 		Resource("horizontalpodautoscalers").
 		Name(name).
 		VersionedParams(&options, scheme.ParameterCodec).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // List takes label and field selectors, and returns the list of HorizontalPodAutoscalers that match those selectors.
-func (c *horizontalPodAutoscalers) List(opts v1.ListOptions) (result *v2beta1.HorizontalPodAutoscalerList, err error) {
+func (c *horizontalPodAutoscalers) List(ctx context.Context, opts v1.ListOptions) (result *v2beta1.HorizontalPodAutoscalerList, err error) {
 	var timeout time.Duration
 	if opts.TimeoutSeconds != nil {
 		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
@@ -88,13 +89,13 @@ func (c *horizontalPodAutoscalers) List(opts v1.ListOptions) (result *v2beta1.Ho
 		Resource("horizontalpodautoscalers").
 		VersionedParams(&opts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Watch returns a watch.Interface that watches the requested horizontalPodAutoscalers.
-func (c *horizontalPodAutoscalers) Watch(opts v1.ListOptions) (watch.Interface, error) {
+func (c *horizontalPodAutoscalers) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
 	var timeout time.Duration
 	if opts.TimeoutSeconds != nil {
 		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
@@ -105,87 +106,90 @@ func (c *horizontalPodAutoscalers) Watch(opts v1.ListOptions) (watch.Interface,
 		Resource("horizontalpodautoscalers").
 		VersionedParams(&opts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Watch()
+		Watch(ctx)
 }
 
 // Create takes the representation of a horizontalPodAutoscaler and creates it.  Returns the server's representation of the horizontalPodAutoscaler, and an error, if there is any.
-func (c *horizontalPodAutoscalers) Create(horizontalPodAutoscaler *v2beta1.HorizontalPodAutoscaler) (result *v2beta1.HorizontalPodAutoscaler, err error) {
+func (c *horizontalPodAutoscalers) Create(ctx context.Context, horizontalPodAutoscaler *v2beta1.HorizontalPodAutoscaler, opts v1.CreateOptions) (result *v2beta1.HorizontalPodAutoscaler, err error) {
 	result = &v2beta1.HorizontalPodAutoscaler{}
 	err = c.client.Post().
 		Namespace(c.ns).
 		Resource("horizontalpodautoscalers").
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(horizontalPodAutoscaler).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Update takes the representation of a horizontalPodAutoscaler and updates it. Returns the server's representation of the horizontalPodAutoscaler, and an error, if there is any.
-func (c *horizontalPodAutoscalers) Update(horizontalPodAutoscaler *v2beta1.HorizontalPodAutoscaler) (result *v2beta1.HorizontalPodAutoscaler, err error) {
+func (c *horizontalPodAutoscalers) Update(ctx context.Context, horizontalPodAutoscaler *v2beta1.HorizontalPodAutoscaler, opts v1.UpdateOptions) (result *v2beta1.HorizontalPodAutoscaler, err error) {
 	result = &v2beta1.HorizontalPodAutoscaler{}
 	err = c.client.Put().
 		Namespace(c.ns).
 		Resource("horizontalpodautoscalers").
 		Name(horizontalPodAutoscaler.Name).
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(horizontalPodAutoscaler).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // UpdateStatus was generated because the type contains a Status member.
 // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
-
-func (c *horizontalPodAutoscalers) UpdateStatus(horizontalPodAutoscaler *v2beta1.HorizontalPodAutoscaler) (result *v2beta1.HorizontalPodAutoscaler, err error) {
+func (c *horizontalPodAutoscalers) UpdateStatus(ctx context.Context, horizontalPodAutoscaler *v2beta1.HorizontalPodAutoscaler, opts v1.UpdateOptions) (result *v2beta1.HorizontalPodAutoscaler, err error) {
 	result = &v2beta1.HorizontalPodAutoscaler{}
 	err = c.client.Put().
 		Namespace(c.ns).
 		Resource("horizontalpodautoscalers").
 		Name(horizontalPodAutoscaler.Name).
 		SubResource("status").
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(horizontalPodAutoscaler).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Delete takes name of the horizontalPodAutoscaler and deletes it. Returns an error if one occurs.
-func (c *horizontalPodAutoscalers) Delete(name string, options *v1.DeleteOptions) error {
+func (c *horizontalPodAutoscalers) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
 	return c.client.Delete().
 		Namespace(c.ns).
 		Resource("horizontalpodautoscalers").
 		Name(name).
-		Body(options).
-		Do().
+		Body(&opts).
+		Do(ctx).
 		Error()
 }
 
 // DeleteCollection deletes a collection of objects.
-func (c *horizontalPodAutoscalers) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
+func (c *horizontalPodAutoscalers) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
 	var timeout time.Duration
-	if listOptions.TimeoutSeconds != nil {
-		timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second
+	if listOpts.TimeoutSeconds != nil {
+		timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
 	}
 	return c.client.Delete().
 		Namespace(c.ns).
 		Resource("horizontalpodautoscalers").
-		VersionedParams(&listOptions, scheme.ParameterCodec).
+		VersionedParams(&listOpts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Body(options).
-		Do().
+		Body(&opts).
+		Do(ctx).
 		Error()
 }
 
 // Patch applies the patch and returns the patched horizontalPodAutoscaler.
-func (c *horizontalPodAutoscalers) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v2beta1.HorizontalPodAutoscaler, err error) {
+func (c *horizontalPodAutoscalers) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v2beta1.HorizontalPodAutoscaler, err error) {
 	result = &v2beta1.HorizontalPodAutoscaler{}
 	err = c.client.Patch(pt).
 		Namespace(c.ns).
 		Resource("horizontalpodautoscalers").
-		SubResource(subresources...).
 		Name(name).
+		SubResource(subresources...).
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(data).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/horizontalpodautoscaler.go b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/horizontalpodautoscaler.go
index 91a0fa64..c7fad108 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/horizontalpodautoscaler.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/horizontalpodautoscaler.go
@@ -19,6 +19,7 @@ limitations under the License.
 package v2beta2
 
 import (
+	"context"
 	"time"
 
 	v2beta2 "k8s.io/api/autoscaling/v2beta2"
@@ -37,15 +38,15 @@ type HorizontalPodAutoscalersGetter interface {
 
 // HorizontalPodAutoscalerInterface has methods to work with HorizontalPodAutoscaler resources.
 type HorizontalPodAutoscalerInterface interface {
-	Create(*v2beta2.HorizontalPodAutoscaler) (*v2beta2.HorizontalPodAutoscaler, error)
-	Update(*v2beta2.HorizontalPodAutoscaler) (*v2beta2.HorizontalPodAutoscaler, error)
-	UpdateStatus(*v2beta2.HorizontalPodAutoscaler) (*v2beta2.HorizontalPodAutoscaler, error)
-	Delete(name string, options *v1.DeleteOptions) error
-	DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error
-	Get(name string, options v1.GetOptions) (*v2beta2.HorizontalPodAutoscaler, error)
-	List(opts v1.ListOptions) (*v2beta2.HorizontalPodAutoscalerList, error)
-	Watch(opts v1.ListOptions) (watch.Interface, error)
-	Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v2beta2.HorizontalPodAutoscaler, err error)
+	Create(ctx context.Context, horizontalPodAutoscaler *v2beta2.HorizontalPodAutoscaler, opts v1.CreateOptions) (*v2beta2.HorizontalPodAutoscaler, error)
+	Update(ctx context.Context, horizontalPodAutoscaler *v2beta2.HorizontalPodAutoscaler, opts v1.UpdateOptions) (*v2beta2.HorizontalPodAutoscaler, error)
+	UpdateStatus(ctx context.Context, horizontalPodAutoscaler *v2beta2.HorizontalPodAutoscaler, opts v1.UpdateOptions) (*v2beta2.HorizontalPodAutoscaler, error)
+	Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
+	DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
+	Get(ctx context.Context, name string, opts v1.GetOptions) (*v2beta2.HorizontalPodAutoscaler, error)
+	List(ctx context.Context, opts v1.ListOptions) (*v2beta2.HorizontalPodAutoscalerList, error)
+	Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
+	Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v2beta2.HorizontalPodAutoscaler, err error)
 	HorizontalPodAutoscalerExpansion
 }
 
@@ -64,20 +65,20 @@ func newHorizontalPodAutoscalers(c *AutoscalingV2beta2Client, namespace string)
 }
 
 // Get takes name of the horizontalPodAutoscaler, and returns the corresponding horizontalPodAutoscaler object, and an error if there is any.
-func (c *horizontalPodAutoscalers) Get(name string, options v1.GetOptions) (result *v2beta2.HorizontalPodAutoscaler, err error) {
+func (c *horizontalPodAutoscalers) Get(ctx context.Context, name string, options v1.GetOptions) (result *v2beta2.HorizontalPodAutoscaler, err error) {
 	result = &v2beta2.HorizontalPodAutoscaler{}
 	err = c.client.Get().
 		Namespace(c.ns).
 		Resource("horizontalpodautoscalers").
 		Name(name).
 		VersionedParams(&options, scheme.ParameterCodec).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // List takes label and field selectors, and returns the list of HorizontalPodAutoscalers that match those selectors.
-func (c *horizontalPodAutoscalers) List(opts v1.ListOptions) (result *v2beta2.HorizontalPodAutoscalerList, err error) {
+func (c *horizontalPodAutoscalers) List(ctx context.Context, opts v1.ListOptions) (result *v2beta2.HorizontalPodAutoscalerList, err error) {
 	var timeout time.Duration
 	if opts.TimeoutSeconds != nil {
 		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
@@ -88,13 +89,13 @@ func (c *horizontalPodAutoscalers) List(opts v1.ListOptions) (result *v2beta2.Ho
 		Resource("horizontalpodautoscalers").
 		VersionedParams(&opts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Watch returns a watch.Interface that watches the requested horizontalPodAutoscalers.
-func (c *horizontalPodAutoscalers) Watch(opts v1.ListOptions) (watch.Interface, error) {
+func (c *horizontalPodAutoscalers) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
 	var timeout time.Duration
 	if opts.TimeoutSeconds != nil {
 		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
@@ -105,87 +106,90 @@ func (c *horizontalPodAutoscalers) Watch(opts v1.ListOptions) (watch.Interface,
 		Resource("horizontalpodautoscalers").
 		VersionedParams(&opts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Watch()
+		Watch(ctx)
 }
 
 // Create takes the representation of a horizontalPodAutoscaler and creates it.  Returns the server's representation of the horizontalPodAutoscaler, and an error, if there is any.
-func (c *horizontalPodAutoscalers) Create(horizontalPodAutoscaler *v2beta2.HorizontalPodAutoscaler) (result *v2beta2.HorizontalPodAutoscaler, err error) {
+func (c *horizontalPodAutoscalers) Create(ctx context.Context, horizontalPodAutoscaler *v2beta2.HorizontalPodAutoscaler, opts v1.CreateOptions) (result *v2beta2.HorizontalPodAutoscaler, err error) {
 	result = &v2beta2.HorizontalPodAutoscaler{}
 	err = c.client.Post().
 		Namespace(c.ns).
 		Resource("horizontalpodautoscalers").
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(horizontalPodAutoscaler).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Update takes the representation of a horizontalPodAutoscaler and updates it. Returns the server's representation of the horizontalPodAutoscaler, and an error, if there is any.
-func (c *horizontalPodAutoscalers) Update(horizontalPodAutoscaler *v2beta2.HorizontalPodAutoscaler) (result *v2beta2.HorizontalPodAutoscaler, err error) {
+func (c *horizontalPodAutoscalers) Update(ctx context.Context, horizontalPodAutoscaler *v2beta2.HorizontalPodAutoscaler, opts v1.UpdateOptions) (result *v2beta2.HorizontalPodAutoscaler, err error) {
 	result = &v2beta2.HorizontalPodAutoscaler{}
 	err = c.client.Put().
 		Namespace(c.ns).
 		Resource("horizontalpodautoscalers").
 		Name(horizontalPodAutoscaler.Name).
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(horizontalPodAutoscaler).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // UpdateStatus was generated because the type contains a Status member.
 // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
-
-func (c *horizontalPodAutoscalers) UpdateStatus(horizontalPodAutoscaler *v2beta2.HorizontalPodAutoscaler) (result *v2beta2.HorizontalPodAutoscaler, err error) {
+func (c *horizontalPodAutoscalers) UpdateStatus(ctx context.Context, horizontalPodAutoscaler *v2beta2.HorizontalPodAutoscaler, opts v1.UpdateOptions) (result *v2beta2.HorizontalPodAutoscaler, err error) {
 	result = &v2beta2.HorizontalPodAutoscaler{}
 	err = c.client.Put().
 		Namespace(c.ns).
 		Resource("horizontalpodautoscalers").
 		Name(horizontalPodAutoscaler.Name).
 		SubResource("status").
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(horizontalPodAutoscaler).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Delete takes name of the horizontalPodAutoscaler and deletes it. Returns an error if one occurs.
-func (c *horizontalPodAutoscalers) Delete(name string, options *v1.DeleteOptions) error {
+func (c *horizontalPodAutoscalers) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
 	return c.client.Delete().
 		Namespace(c.ns).
 		Resource("horizontalpodautoscalers").
 		Name(name).
-		Body(options).
-		Do().
+		Body(&opts).
+		Do(ctx).
 		Error()
 }
 
 // DeleteCollection deletes a collection of objects.
-func (c *horizontalPodAutoscalers) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
+func (c *horizontalPodAutoscalers) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
 	var timeout time.Duration
-	if listOptions.TimeoutSeconds != nil {
-		timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second
+	if listOpts.TimeoutSeconds != nil {
+		timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
 	}
 	return c.client.Delete().
 		Namespace(c.ns).
 		Resource("horizontalpodautoscalers").
-		VersionedParams(&listOptions, scheme.ParameterCodec).
+		VersionedParams(&listOpts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Body(options).
-		Do().
+		Body(&opts).
+		Do(ctx).
 		Error()
 }
 
 // Patch applies the patch and returns the patched horizontalPodAutoscaler.
-func (c *horizontalPodAutoscalers) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v2beta2.HorizontalPodAutoscaler, err error) {
+func (c *horizontalPodAutoscalers) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v2beta2.HorizontalPodAutoscaler, err error) {
 	result = &v2beta2.HorizontalPodAutoscaler{}
 	err = c.client.Patch(pt).
 		Namespace(c.ns).
 		Resource("horizontalpodautoscalers").
-		SubResource(subresources...).
 		Name(name).
+		SubResource(subresources...).
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(data).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/job.go b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/job.go
index b55c602b..a20c8e0e 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/job.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/job.go
@@ -19,6 +19,7 @@ limitations under the License.
 package v1
 
 import (
+	"context"
 	"time"
 
 	v1 "k8s.io/api/batch/v1"
@@ -37,15 +38,15 @@ type JobsGetter interface {
 
 // JobInterface has methods to work with Job resources.
 type JobInterface interface {
-	Create(*v1.Job) (*v1.Job, error)
-	Update(*v1.Job) (*v1.Job, error)
-	UpdateStatus(*v1.Job) (*v1.Job, error)
-	Delete(name string, options *metav1.DeleteOptions) error
-	DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error
-	Get(name string, options metav1.GetOptions) (*v1.Job, error)
-	List(opts metav1.ListOptions) (*v1.JobList, error)
-	Watch(opts metav1.ListOptions) (watch.Interface, error)
-	Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Job, err error)
+	Create(ctx context.Context, job *v1.Job, opts metav1.CreateOptions) (*v1.Job, error)
+	Update(ctx context.Context, job *v1.Job, opts metav1.UpdateOptions) (*v1.Job, error)
+	UpdateStatus(ctx context.Context, job *v1.Job, opts metav1.UpdateOptions) (*v1.Job, error)
+	Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error
+	DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error
+	Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.Job, error)
+	List(ctx context.Context, opts metav1.ListOptions) (*v1.JobList, error)
+	Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error)
+	Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Job, err error)
 	JobExpansion
 }
 
@@ -64,20 +65,20 @@ func newJobs(c *BatchV1Client, namespace string) *jobs {
 }
 
 // Get takes name of the job, and returns the corresponding job object, and an error if there is any.
-func (c *jobs) Get(name string, options metav1.GetOptions) (result *v1.Job, err error) {
+func (c *jobs) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Job, err error) {
 	result = &v1.Job{}
 	err = c.client.Get().
 		Namespace(c.ns).
 		Resource("jobs").
 		Name(name).
 		VersionedParams(&options, scheme.ParameterCodec).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // List takes label and field selectors, and returns the list of Jobs that match those selectors.
-func (c *jobs) List(opts metav1.ListOptions) (result *v1.JobList, err error) {
+func (c *jobs) List(ctx context.Context, opts metav1.ListOptions) (result *v1.JobList, err error) {
 	var timeout time.Duration
 	if opts.TimeoutSeconds != nil {
 		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
@@ -88,13 +89,13 @@ func (c *jobs) List(opts metav1.ListOptions) (result *v1.JobList, err error) {
 		Resource("jobs").
 		VersionedParams(&opts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Watch returns a watch.Interface that watches the requested jobs.
-func (c *jobs) Watch(opts metav1.ListOptions) (watch.Interface, error) {
+func (c *jobs) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
 	var timeout time.Duration
 	if opts.TimeoutSeconds != nil {
 		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
@@ -105,87 +106,90 @@ func (c *jobs) Watch(opts metav1.ListOptions) (watch.Interface, error) {
 		Resource("jobs").
 		VersionedParams(&opts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Watch()
+		Watch(ctx)
 }
 
 // Create takes the representation of a job and creates it.  Returns the server's representation of the job, and an error, if there is any.
-func (c *jobs) Create(job *v1.Job) (result *v1.Job, err error) {
+func (c *jobs) Create(ctx context.Context, job *v1.Job, opts metav1.CreateOptions) (result *v1.Job, err error) {
 	result = &v1.Job{}
 	err = c.client.Post().
 		Namespace(c.ns).
 		Resource("jobs").
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(job).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Update takes the representation of a job and updates it. Returns the server's representation of the job, and an error, if there is any.
-func (c *jobs) Update(job *v1.Job) (result *v1.Job, err error) {
+func (c *jobs) Update(ctx context.Context, job *v1.Job, opts metav1.UpdateOptions) (result *v1.Job, err error) {
 	result = &v1.Job{}
 	err = c.client.Put().
 		Namespace(c.ns).
 		Resource("jobs").
 		Name(job.Name).
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(job).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // UpdateStatus was generated because the type contains a Status member.
 // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
-
-func (c *jobs) UpdateStatus(job *v1.Job) (result *v1.Job, err error) {
+func (c *jobs) UpdateStatus(ctx context.Context, job *v1.Job, opts metav1.UpdateOptions) (result *v1.Job, err error) {
 	result = &v1.Job{}
 	err = c.client.Put().
 		Namespace(c.ns).
 		Resource("jobs").
 		Name(job.Name).
 		SubResource("status").
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(job).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Delete takes name of the job and deletes it. Returns an error if one occurs.
-func (c *jobs) Delete(name string, options *metav1.DeleteOptions) error {
+func (c *jobs) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error {
 	return c.client.Delete().
 		Namespace(c.ns).
 		Resource("jobs").
 		Name(name).
-		Body(options).
-		Do().
+		Body(&opts).
+		Do(ctx).
 		Error()
 }
 
 // DeleteCollection deletes a collection of objects.
-func (c *jobs) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error {
+func (c *jobs) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error {
 	var timeout time.Duration
-	if listOptions.TimeoutSeconds != nil {
-		timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second
+	if listOpts.TimeoutSeconds != nil {
+		timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
 	}
 	return c.client.Delete().
 		Namespace(c.ns).
 		Resource("jobs").
-		VersionedParams(&listOptions, scheme.ParameterCodec).
+		VersionedParams(&listOpts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Body(options).
-		Do().
+		Body(&opts).
+		Do(ctx).
 		Error()
 }
 
 // Patch applies the patch and returns the patched job.
-func (c *jobs) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Job, err error) {
+func (c *jobs) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Job, err error) {
 	result = &v1.Job{}
 	err = c.client.Patch(pt).
 		Namespace(c.ns).
 		Resource("jobs").
-		SubResource(subresources...).
 		Name(name).
+		SubResource(subresources...).
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(data).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/cronjob.go b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/cronjob.go
index d89d2fa2..07652029 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/cronjob.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/cronjob.go
@@ -19,6 +19,7 @@ limitations under the License.
 package v1beta1
 
 import (
+	"context"
 	"time"
 
 	v1beta1 "k8s.io/api/batch/v1beta1"
@@ -37,15 +38,15 @@ type CronJobsGetter interface {
 
 // CronJobInterface has methods to work with CronJob resources.
 type CronJobInterface interface {
-	Create(*v1beta1.CronJob) (*v1beta1.CronJob, error)
-	Update(*v1beta1.CronJob) (*v1beta1.CronJob, error)
-	UpdateStatus(*v1beta1.CronJob) (*v1beta1.CronJob, error)
-	Delete(name string, options *v1.DeleteOptions) error
-	DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error
-	Get(name string, options v1.GetOptions) (*v1beta1.CronJob, error)
-	List(opts v1.ListOptions) (*v1beta1.CronJobList, error)
-	Watch(opts v1.ListOptions) (watch.Interface, error)
-	Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.CronJob, err error)
+	Create(ctx context.Context, cronJob *v1beta1.CronJob, opts v1.CreateOptions) (*v1beta1.CronJob, error)
+	Update(ctx context.Context, cronJob *v1beta1.CronJob, opts v1.UpdateOptions) (*v1beta1.CronJob, error)
+	UpdateStatus(ctx context.Context, cronJob *v1beta1.CronJob, opts v1.UpdateOptions) (*v1beta1.CronJob, error)
+	Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
+	DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
+	Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.CronJob, error)
+	List(ctx context.Context, opts v1.ListOptions) (*v1beta1.CronJobList, error)
+	Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
+	Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.CronJob, err error)
 	CronJobExpansion
 }
 
@@ -64,20 +65,20 @@ func newCronJobs(c *BatchV1beta1Client, namespace string) *cronJobs {
 }
 
 // Get takes name of the cronJob, and returns the corresponding cronJob object, and an error if there is any.
-func (c *cronJobs) Get(name string, options v1.GetOptions) (result *v1beta1.CronJob, err error) {
+func (c *cronJobs) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.CronJob, err error) {
 	result = &v1beta1.CronJob{}
 	err = c.client.Get().
 		Namespace(c.ns).
 		Resource("cronjobs").
 		Name(name).
 		VersionedParams(&options, scheme.ParameterCodec).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // List takes label and field selectors, and returns the list of CronJobs that match those selectors.
-func (c *cronJobs) List(opts v1.ListOptions) (result *v1beta1.CronJobList, err error) {
+func (c *cronJobs) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.CronJobList, err error) {
 	var timeout time.Duration
 	if opts.TimeoutSeconds != nil {
 		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
@@ -88,13 +89,13 @@ func (c *cronJobs) List(opts v1.ListOptions) (result *v1beta1.CronJobList, err e
 		Resource("cronjobs").
 		VersionedParams(&opts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Watch returns a watch.Interface that watches the requested cronJobs.
-func (c *cronJobs) Watch(opts v1.ListOptions) (watch.Interface, error) {
+func (c *cronJobs) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
 	var timeout time.Duration
 	if opts.TimeoutSeconds != nil {
 		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
@@ -105,87 +106,90 @@ func (c *cronJobs) Watch(opts v1.ListOptions) (watch.Interface, error) {
 		Resource("cronjobs").
 		VersionedParams(&opts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Watch()
+		Watch(ctx)
 }
 
 // Create takes the representation of a cronJob and creates it.  Returns the server's representation of the cronJob, and an error, if there is any.
-func (c *cronJobs) Create(cronJob *v1beta1.CronJob) (result *v1beta1.CronJob, err error) {
+func (c *cronJobs) Create(ctx context.Context, cronJob *v1beta1.CronJob, opts v1.CreateOptions) (result *v1beta1.CronJob, err error) {
 	result = &v1beta1.CronJob{}
 	err = c.client.Post().
 		Namespace(c.ns).
 		Resource("cronjobs").
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(cronJob).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Update takes the representation of a cronJob and updates it. Returns the server's representation of the cronJob, and an error, if there is any.
-func (c *cronJobs) Update(cronJob *v1beta1.CronJob) (result *v1beta1.CronJob, err error) {
+func (c *cronJobs) Update(ctx context.Context, cronJob *v1beta1.CronJob, opts v1.UpdateOptions) (result *v1beta1.CronJob, err error) {
 	result = &v1beta1.CronJob{}
 	err = c.client.Put().
 		Namespace(c.ns).
 		Resource("cronjobs").
 		Name(cronJob.Name).
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(cronJob).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // UpdateStatus was generated because the type contains a Status member.
 // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
-
-func (c *cronJobs) UpdateStatus(cronJob *v1beta1.CronJob) (result *v1beta1.CronJob, err error) {
+func (c *cronJobs) UpdateStatus(ctx context.Context, cronJob *v1beta1.CronJob, opts v1.UpdateOptions) (result *v1beta1.CronJob, err error) {
 	result = &v1beta1.CronJob{}
 	err = c.client.Put().
 		Namespace(c.ns).
 		Resource("cronjobs").
 		Name(cronJob.Name).
 		SubResource("status").
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(cronJob).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Delete takes name of the cronJob and deletes it. Returns an error if one occurs.
-func (c *cronJobs) Delete(name string, options *v1.DeleteOptions) error {
+func (c *cronJobs) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
 	return c.client.Delete().
 		Namespace(c.ns).
 		Resource("cronjobs").
 		Name(name).
-		Body(options).
-		Do().
+		Body(&opts).
+		Do(ctx).
 		Error()
 }
 
 // DeleteCollection deletes a collection of objects.
-func (c *cronJobs) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
+func (c *cronJobs) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
 	var timeout time.Duration
-	if listOptions.TimeoutSeconds != nil {
-		timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second
+	if listOpts.TimeoutSeconds != nil {
+		timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
 	}
 	return c.client.Delete().
 		Namespace(c.ns).
 		Resource("cronjobs").
-		VersionedParams(&listOptions, scheme.ParameterCodec).
+		VersionedParams(&listOpts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Body(options).
-		Do().
+		Body(&opts).
+		Do(ctx).
 		Error()
 }
 
 // Patch applies the patch and returns the patched cronJob.
-func (c *cronJobs) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.CronJob, err error) {
+func (c *cronJobs) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.CronJob, err error) {
 	result = &v1beta1.CronJob{}
 	err = c.client.Patch(pt).
 		Namespace(c.ns).
 		Resource("cronjobs").
-		SubResource(subresources...).
 		Name(name).
+		SubResource(subresources...).
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(data).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/cronjob.go b/vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/cronjob.go
index 19123b60..a25054f2 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/cronjob.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/cronjob.go
@@ -19,6 +19,7 @@ limitations under the License.
 package v2alpha1
 
 import (
+	"context"
 	"time"
 
 	v2alpha1 "k8s.io/api/batch/v2alpha1"
@@ -37,15 +38,15 @@ type CronJobsGetter interface {
 
 // CronJobInterface has methods to work with CronJob resources.
 type CronJobInterface interface {
-	Create(*v2alpha1.CronJob) (*v2alpha1.CronJob, error)
-	Update(*v2alpha1.CronJob) (*v2alpha1.CronJob, error)
-	UpdateStatus(*v2alpha1.CronJob) (*v2alpha1.CronJob, error)
-	Delete(name string, options *v1.DeleteOptions) error
-	DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error
-	Get(name string, options v1.GetOptions) (*v2alpha1.CronJob, error)
-	List(opts v1.ListOptions) (*v2alpha1.CronJobList, error)
-	Watch(opts v1.ListOptions) (watch.Interface, error)
-	Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v2alpha1.CronJob, err error)
+	Create(ctx context.Context, cronJob *v2alpha1.CronJob, opts v1.CreateOptions) (*v2alpha1.CronJob, error)
+	Update(ctx context.Context, cronJob *v2alpha1.CronJob, opts v1.UpdateOptions) (*v2alpha1.CronJob, error)
+	UpdateStatus(ctx context.Context, cronJob *v2alpha1.CronJob, opts v1.UpdateOptions) (*v2alpha1.CronJob, error)
+	Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
+	DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
+	Get(ctx context.Context, name string, opts v1.GetOptions) (*v2alpha1.CronJob, error)
+	List(ctx context.Context, opts v1.ListOptions) (*v2alpha1.CronJobList, error)
+	Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
+	Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v2alpha1.CronJob, err error)
 	CronJobExpansion
 }
 
@@ -64,20 +65,20 @@ func newCronJobs(c *BatchV2alpha1Client, namespace string) *cronJobs {
 }
 
 // Get takes name of the cronJob, and returns the corresponding cronJob object, and an error if there is any.
-func (c *cronJobs) Get(name string, options v1.GetOptions) (result *v2alpha1.CronJob, err error) {
+func (c *cronJobs) Get(ctx context.Context, name string, options v1.GetOptions) (result *v2alpha1.CronJob, err error) {
 	result = &v2alpha1.CronJob{}
 	err = c.client.Get().
 		Namespace(c.ns).
 		Resource("cronjobs").
 		Name(name).
 		VersionedParams(&options, scheme.ParameterCodec).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // List takes label and field selectors, and returns the list of CronJobs that match those selectors.
-func (c *cronJobs) List(opts v1.ListOptions) (result *v2alpha1.CronJobList, err error) {
+func (c *cronJobs) List(ctx context.Context, opts v1.ListOptions) (result *v2alpha1.CronJobList, err error) {
 	var timeout time.Duration
 	if opts.TimeoutSeconds != nil {
 		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
@@ -88,13 +89,13 @@ func (c *cronJobs) List(opts v1.ListOptions) (result *v2alpha1.CronJobList, err
 		Resource("cronjobs").
 		VersionedParams(&opts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Watch returns a watch.Interface that watches the requested cronJobs.
-func (c *cronJobs) Watch(opts v1.ListOptions) (watch.Interface, error) {
+func (c *cronJobs) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
 	var timeout time.Duration
 	if opts.TimeoutSeconds != nil {
 		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
@@ -105,87 +106,90 @@ func (c *cronJobs) Watch(opts v1.ListOptions) (watch.Interface, error) {
 		Resource("cronjobs").
 		VersionedParams(&opts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Watch()
+		Watch(ctx)
 }
 
 // Create takes the representation of a cronJob and creates it.  Returns the server's representation of the cronJob, and an error, if there is any.
-func (c *cronJobs) Create(cronJob *v2alpha1.CronJob) (result *v2alpha1.CronJob, err error) {
+func (c *cronJobs) Create(ctx context.Context, cronJob *v2alpha1.CronJob, opts v1.CreateOptions) (result *v2alpha1.CronJob, err error) {
 	result = &v2alpha1.CronJob{}
 	err = c.client.Post().
 		Namespace(c.ns).
 		Resource("cronjobs").
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(cronJob).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Update takes the representation of a cronJob and updates it. Returns the server's representation of the cronJob, and an error, if there is any.
-func (c *cronJobs) Update(cronJob *v2alpha1.CronJob) (result *v2alpha1.CronJob, err error) {
+func (c *cronJobs) Update(ctx context.Context, cronJob *v2alpha1.CronJob, opts v1.UpdateOptions) (result *v2alpha1.CronJob, err error) {
 	result = &v2alpha1.CronJob{}
 	err = c.client.Put().
 		Namespace(c.ns).
 		Resource("cronjobs").
 		Name(cronJob.Name).
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(cronJob).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // UpdateStatus was generated because the type contains a Status member.
 // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
-
-func (c *cronJobs) UpdateStatus(cronJob *v2alpha1.CronJob) (result *v2alpha1.CronJob, err error) {
+func (c *cronJobs) UpdateStatus(ctx context.Context, cronJob *v2alpha1.CronJob, opts v1.UpdateOptions) (result *v2alpha1.CronJob, err error) {
 	result = &v2alpha1.CronJob{}
 	err = c.client.Put().
 		Namespace(c.ns).
 		Resource("cronjobs").
 		Name(cronJob.Name).
 		SubResource("status").
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(cronJob).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Delete takes name of the cronJob and deletes it. Returns an error if one occurs.
-func (c *cronJobs) Delete(name string, options *v1.DeleteOptions) error {
+func (c *cronJobs) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
 	return c.client.Delete().
 		Namespace(c.ns).
 		Resource("cronjobs").
 		Name(name).
-		Body(options).
-		Do().
+		Body(&opts).
+		Do(ctx).
 		Error()
 }
 
 // DeleteCollection deletes a collection of objects.
-func (c *cronJobs) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
+func (c *cronJobs) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
 	var timeout time.Duration
-	if listOptions.TimeoutSeconds != nil {
-		timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second
+	if listOpts.TimeoutSeconds != nil {
+		timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
 	}
 	return c.client.Delete().
 		Namespace(c.ns).
 		Resource("cronjobs").
-		VersionedParams(&listOptions, scheme.ParameterCodec).
+		VersionedParams(&listOpts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Body(options).
-		Do().
+		Body(&opts).
+		Do(ctx).
 		Error()
 }
 
 // Patch applies the patch and returns the patched cronJob.
-func (c *cronJobs) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v2alpha1.CronJob, err error) {
+func (c *cronJobs) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v2alpha1.CronJob, err error) {
 	result = &v2alpha1.CronJob{}
 	err = c.client.Patch(pt).
 		Namespace(c.ns).
 		Resource("cronjobs").
-		SubResource(subresources...).
 		Name(name).
+		SubResource(subresources...).
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(data).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/certificatesigningrequest.go b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/certificatesigningrequest.go
index 712d3a01..6b2623b8 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/certificatesigningrequest.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/certificatesigningrequest.go
@@ -19,6 +19,7 @@ limitations under the License.
 package v1beta1
 
 import (
+	"context"
 	"time"
 
 	v1beta1 "k8s.io/api/certificates/v1beta1"
@@ -37,15 +38,15 @@ type CertificateSigningRequestsGetter interface {
 
 // CertificateSigningRequestInterface has methods to work with CertificateSigningRequest resources.
 type CertificateSigningRequestInterface interface {
-	Create(*v1beta1.CertificateSigningRequest) (*v1beta1.CertificateSigningRequest, error)
-	Update(*v1beta1.CertificateSigningRequest) (*v1beta1.CertificateSigningRequest, error)
-	UpdateStatus(*v1beta1.CertificateSigningRequest) (*v1beta1.CertificateSigningRequest, error)
-	Delete(name string, options *v1.DeleteOptions) error
-	DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error
-	Get(name string, options v1.GetOptions) (*v1beta1.CertificateSigningRequest, error)
-	List(opts v1.ListOptions) (*v1beta1.CertificateSigningRequestList, error)
-	Watch(opts v1.ListOptions) (watch.Interface, error)
-	Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.CertificateSigningRequest, err error)
+	Create(ctx context.Context, certificateSigningRequest *v1beta1.CertificateSigningRequest, opts v1.CreateOptions) (*v1beta1.CertificateSigningRequest, error)
+	Update(ctx context.Context, certificateSigningRequest *v1beta1.CertificateSigningRequest, opts v1.UpdateOptions) (*v1beta1.CertificateSigningRequest, error)
+	UpdateStatus(ctx context.Context, certificateSigningRequest *v1beta1.CertificateSigningRequest, opts v1.UpdateOptions) (*v1beta1.CertificateSigningRequest, error)
+	Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
+	DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
+	Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.CertificateSigningRequest, error)
+	List(ctx context.Context, opts v1.ListOptions) (*v1beta1.CertificateSigningRequestList, error)
+	Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
+	Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.CertificateSigningRequest, err error)
 	CertificateSigningRequestExpansion
 }
 
@@ -62,19 +63,19 @@ func newCertificateSigningRequests(c *CertificatesV1beta1Client) *certificateSig
 }
 
 // Get takes name of the certificateSigningRequest, and returns the corresponding certificateSigningRequest object, and an error if there is any.
-func (c *certificateSigningRequests) Get(name string, options v1.GetOptions) (result *v1beta1.CertificateSigningRequest, err error) {
+func (c *certificateSigningRequests) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.CertificateSigningRequest, err error) {
 	result = &v1beta1.CertificateSigningRequest{}
 	err = c.client.Get().
 		Resource("certificatesigningrequests").
 		Name(name).
 		VersionedParams(&options, scheme.ParameterCodec).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // List takes label and field selectors, and returns the list of CertificateSigningRequests that match those selectors.
-func (c *certificateSigningRequests) List(opts v1.ListOptions) (result *v1beta1.CertificateSigningRequestList, err error) {
+func (c *certificateSigningRequests) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.CertificateSigningRequestList, err error) {
 	var timeout time.Duration
 	if opts.TimeoutSeconds != nil {
 		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
@@ -84,13 +85,13 @@ func (c *certificateSigningRequests) List(opts v1.ListOptions) (result *v1beta1.
 		Resource("certificatesigningrequests").
 		VersionedParams(&opts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Watch returns a watch.Interface that watches the requested certificateSigningRequests.
-func (c *certificateSigningRequests) Watch(opts v1.ListOptions) (watch.Interface, error) {
+func (c *certificateSigningRequests) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
 	var timeout time.Duration
 	if opts.TimeoutSeconds != nil {
 		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
@@ -100,81 +101,84 @@ func (c *certificateSigningRequests) Watch(opts v1.ListOptions) (watch.Interface
 		Resource("certificatesigningrequests").
 		VersionedParams(&opts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Watch()
+		Watch(ctx)
 }
 
 // Create takes the representation of a certificateSigningRequest and creates it.  Returns the server's representation of the certificateSigningRequest, and an error, if there is any.
-func (c *certificateSigningRequests) Create(certificateSigningRequest *v1beta1.CertificateSigningRequest) (result *v1beta1.CertificateSigningRequest, err error) {
+func (c *certificateSigningRequests) Create(ctx context.Context, certificateSigningRequest *v1beta1.CertificateSigningRequest, opts v1.CreateOptions) (result *v1beta1.CertificateSigningRequest, err error) {
 	result = &v1beta1.CertificateSigningRequest{}
 	err = c.client.Post().
 		Resource("certificatesigningrequests").
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(certificateSigningRequest).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Update takes the representation of a certificateSigningRequest and updates it. Returns the server's representation of the certificateSigningRequest, and an error, if there is any.
-func (c *certificateSigningRequests) Update(certificateSigningRequest *v1beta1.CertificateSigningRequest) (result *v1beta1.CertificateSigningRequest, err error) {
+func (c *certificateSigningRequests) Update(ctx context.Context, certificateSigningRequest *v1beta1.CertificateSigningRequest, opts v1.UpdateOptions) (result *v1beta1.CertificateSigningRequest, err error) {
 	result = &v1beta1.CertificateSigningRequest{}
 	err = c.client.Put().
 		Resource("certificatesigningrequests").
 		Name(certificateSigningRequest.Name).
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(certificateSigningRequest).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // UpdateStatus was generated because the type contains a Status member.
 // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
-
-func (c *certificateSigningRequests) UpdateStatus(certificateSigningRequest *v1beta1.CertificateSigningRequest) (result *v1beta1.CertificateSigningRequest, err error) {
+func (c *certificateSigningRequests) UpdateStatus(ctx context.Context, certificateSigningRequest *v1beta1.CertificateSigningRequest, opts v1.UpdateOptions) (result *v1beta1.CertificateSigningRequest, err error) {
 	result = &v1beta1.CertificateSigningRequest{}
 	err = c.client.Put().
 		Resource("certificatesigningrequests").
 		Name(certificateSigningRequest.Name).
 		SubResource("status").
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(certificateSigningRequest).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Delete takes name of the certificateSigningRequest and deletes it. Returns an error if one occurs.
-func (c *certificateSigningRequests) Delete(name string, options *v1.DeleteOptions) error {
+func (c *certificateSigningRequests) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
 	return c.client.Delete().
 		Resource("certificatesigningrequests").
 		Name(name).
-		Body(options).
-		Do().
+		Body(&opts).
+		Do(ctx).
 		Error()
 }
 
 // DeleteCollection deletes a collection of objects.
-func (c *certificateSigningRequests) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
+func (c *certificateSigningRequests) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
 	var timeout time.Duration
-	if listOptions.TimeoutSeconds != nil {
-		timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second
+	if listOpts.TimeoutSeconds != nil {
+		timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
 	}
 	return c.client.Delete().
 		Resource("certificatesigningrequests").
-		VersionedParams(&listOptions, scheme.ParameterCodec).
+		VersionedParams(&listOpts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Body(options).
-		Do().
+		Body(&opts).
+		Do(ctx).
 		Error()
 }
 
 // Patch applies the patch and returns the patched certificateSigningRequest.
-func (c *certificateSigningRequests) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.CertificateSigningRequest, err error) {
+func (c *certificateSigningRequests) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.CertificateSigningRequest, err error) {
 	result = &v1beta1.CertificateSigningRequest{}
 	err = c.client.Patch(pt).
 		Resource("certificatesigningrequests").
-		SubResource(subresources...).
 		Name(name).
+		SubResource(subresources...).
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(data).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/certificatesigningrequest_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/certificatesigningrequest_expansion.go
index c63b8063..47378914 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/certificatesigningrequest_expansion.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/certificatesigningrequest_expansion.go
@@ -17,21 +17,26 @@ limitations under the License.
 package v1beta1
 
 import (
+	"context"
+
 	certificates "k8s.io/api/certificates/v1beta1"
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	scheme "k8s.io/client-go/kubernetes/scheme"
 )
 
 type CertificateSigningRequestExpansion interface {
-	UpdateApproval(certificateSigningRequest *certificates.CertificateSigningRequest) (result *certificates.CertificateSigningRequest, err error)
+	UpdateApproval(ctx context.Context, certificateSigningRequest *certificates.CertificateSigningRequest, opts metav1.UpdateOptions) (result *certificates.CertificateSigningRequest, err error)
 }
 
-func (c *certificateSigningRequests) UpdateApproval(certificateSigningRequest *certificates.CertificateSigningRequest) (result *certificates.CertificateSigningRequest, err error) {
+func (c *certificateSigningRequests) UpdateApproval(ctx context.Context, certificateSigningRequest *certificates.CertificateSigningRequest, opts metav1.UpdateOptions) (result *certificates.CertificateSigningRequest, err error) {
 	result = &certificates.CertificateSigningRequest{}
 	err = c.client.Put().
 		Resource("certificatesigningrequests").
 		Name(certificateSigningRequest.Name).
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(certificateSigningRequest).
 		SubResource("approval").
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1/lease.go b/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1/lease.go
index b6cf1b64..4e8cbf9d 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1/lease.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1/lease.go
@@ -19,6 +19,7 @@ limitations under the License.
 package v1
 
 import (
+	"context"
 	"time"
 
 	v1 "k8s.io/api/coordination/v1"
@@ -37,14 +38,14 @@ type LeasesGetter interface {
 
 // LeaseInterface has methods to work with Lease resources.
 type LeaseInterface interface {
-	Create(*v1.Lease) (*v1.Lease, error)
-	Update(*v1.Lease) (*v1.Lease, error)
-	Delete(name string, options *metav1.DeleteOptions) error
-	DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error
-	Get(name string, options metav1.GetOptions) (*v1.Lease, error)
-	List(opts metav1.ListOptions) (*v1.LeaseList, error)
-	Watch(opts metav1.ListOptions) (watch.Interface, error)
-	Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Lease, err error)
+	Create(ctx context.Context, lease *v1.Lease, opts metav1.CreateOptions) (*v1.Lease, error)
+	Update(ctx context.Context, lease *v1.Lease, opts metav1.UpdateOptions) (*v1.Lease, error)
+	Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error
+	DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error
+	Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.Lease, error)
+	List(ctx context.Context, opts metav1.ListOptions) (*v1.LeaseList, error)
+	Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error)
+	Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Lease, err error)
 	LeaseExpansion
 }
 
@@ -63,20 +64,20 @@ func newLeases(c *CoordinationV1Client, namespace string) *leases {
 }
 
 // Get takes name of the lease, and returns the corresponding lease object, and an error if there is any.
-func (c *leases) Get(name string, options metav1.GetOptions) (result *v1.Lease, err error) {
+func (c *leases) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Lease, err error) {
 	result = &v1.Lease{}
 	err = c.client.Get().
 		Namespace(c.ns).
 		Resource("leases").
 		Name(name).
 		VersionedParams(&options, scheme.ParameterCodec).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // List takes label and field selectors, and returns the list of Leases that match those selectors.
-func (c *leases) List(opts metav1.ListOptions) (result *v1.LeaseList, err error) {
+func (c *leases) List(ctx context.Context, opts metav1.ListOptions) (result *v1.LeaseList, err error) {
 	var timeout time.Duration
 	if opts.TimeoutSeconds != nil {
 		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
@@ -87,13 +88,13 @@ func (c *leases) List(opts metav1.ListOptions) (result *v1.LeaseList, err error)
 		Resource("leases").
 		VersionedParams(&opts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Watch returns a watch.Interface that watches the requested leases.
-func (c *leases) Watch(opts metav1.ListOptions) (watch.Interface, error) {
+func (c *leases) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
 	var timeout time.Duration
 	if opts.TimeoutSeconds != nil {
 		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
@@ -104,71 +105,74 @@ func (c *leases) Watch(opts metav1.ListOptions) (watch.Interface, error) {
 		Resource("leases").
 		VersionedParams(&opts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Watch()
+		Watch(ctx)
 }
 
 // Create takes the representation of a lease and creates it.  Returns the server's representation of the lease, and an error, if there is any.
-func (c *leases) Create(lease *v1.Lease) (result *v1.Lease, err error) {
+func (c *leases) Create(ctx context.Context, lease *v1.Lease, opts metav1.CreateOptions) (result *v1.Lease, err error) {
 	result = &v1.Lease{}
 	err = c.client.Post().
 		Namespace(c.ns).
 		Resource("leases").
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(lease).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Update takes the representation of a lease and updates it. Returns the server's representation of the lease, and an error, if there is any.
-func (c *leases) Update(lease *v1.Lease) (result *v1.Lease, err error) {
+func (c *leases) Update(ctx context.Context, lease *v1.Lease, opts metav1.UpdateOptions) (result *v1.Lease, err error) {
 	result = &v1.Lease{}
 	err = c.client.Put().
 		Namespace(c.ns).
 		Resource("leases").
 		Name(lease.Name).
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(lease).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Delete takes name of the lease and deletes it. Returns an error if one occurs.
-func (c *leases) Delete(name string, options *metav1.DeleteOptions) error {
+func (c *leases) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error {
 	return c.client.Delete().
 		Namespace(c.ns).
 		Resource("leases").
 		Name(name).
-		Body(options).
-		Do().
+		Body(&opts).
+		Do(ctx).
 		Error()
 }
 
 // DeleteCollection deletes a collection of objects.
-func (c *leases) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error {
+func (c *leases) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error {
 	var timeout time.Duration
-	if listOptions.TimeoutSeconds != nil {
-		timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second
+	if listOpts.TimeoutSeconds != nil {
+		timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
 	}
 	return c.client.Delete().
 		Namespace(c.ns).
 		Resource("leases").
-		VersionedParams(&listOptions, scheme.ParameterCodec).
+		VersionedParams(&listOpts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Body(options).
-		Do().
+		Body(&opts).
+		Do(ctx).
 		Error()
 }
 
 // Patch applies the patch and returns the patched lease.
-func (c *leases) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Lease, err error) {
+func (c *leases) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Lease, err error) {
 	result = &v1.Lease{}
 	err = c.client.Patch(pt).
 		Namespace(c.ns).
 		Resource("leases").
-		SubResource(subresources...).
 		Name(name).
+		SubResource(subresources...).
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(data).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/lease.go b/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/lease.go
index 490d815a..c73cb0a9 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/lease.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/lease.go
@@ -19,6 +19,7 @@ limitations under the License.
 package v1beta1
 
 import (
+	"context"
 	"time"
 
 	v1beta1 "k8s.io/api/coordination/v1beta1"
@@ -37,14 +38,14 @@ type LeasesGetter interface {
 
 // LeaseInterface has methods to work with Lease resources.
 type LeaseInterface interface {
-	Create(*v1beta1.Lease) (*v1beta1.Lease, error)
-	Update(*v1beta1.Lease) (*v1beta1.Lease, error)
-	Delete(name string, options *v1.DeleteOptions) error
-	DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error
-	Get(name string, options v1.GetOptions) (*v1beta1.Lease, error)
-	List(opts v1.ListOptions) (*v1beta1.LeaseList, error)
-	Watch(opts v1.ListOptions) (watch.Interface, error)
-	Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.Lease, err error)
+	Create(ctx context.Context, lease *v1beta1.Lease, opts v1.CreateOptions) (*v1beta1.Lease, error)
+	Update(ctx context.Context, lease *v1beta1.Lease, opts v1.UpdateOptions) (*v1beta1.Lease, error)
+	Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
+	DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
+	Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.Lease, error)
+	List(ctx context.Context, opts v1.ListOptions) (*v1beta1.LeaseList, error)
+	Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
+	Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.Lease, err error)
 	LeaseExpansion
 }
 
@@ -63,20 +64,20 @@ func newLeases(c *CoordinationV1beta1Client, namespace string) *leases {
 }
 
 // Get takes name of the lease, and returns the corresponding lease object, and an error if there is any.
-func (c *leases) Get(name string, options v1.GetOptions) (result *v1beta1.Lease, err error) {
+func (c *leases) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.Lease, err error) {
 	result = &v1beta1.Lease{}
 	err = c.client.Get().
 		Namespace(c.ns).
 		Resource("leases").
 		Name(name).
 		VersionedParams(&options, scheme.ParameterCodec).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // List takes label and field selectors, and returns the list of Leases that match those selectors.
-func (c *leases) List(opts v1.ListOptions) (result *v1beta1.LeaseList, err error) {
+func (c *leases) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.LeaseList, err error) {
 	var timeout time.Duration
 	if opts.TimeoutSeconds != nil {
 		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
@@ -87,13 +88,13 @@ func (c *leases) List(opts v1.ListOptions) (result *v1beta1.LeaseList, err error
 		Resource("leases").
 		VersionedParams(&opts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Watch returns a watch.Interface that watches the requested leases.
-func (c *leases) Watch(opts v1.ListOptions) (watch.Interface, error) {
+func (c *leases) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
 	var timeout time.Duration
 	if opts.TimeoutSeconds != nil {
 		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
@@ -104,71 +105,74 @@ func (c *leases) Watch(opts v1.ListOptions) (watch.Interface, error) {
 		Resource("leases").
 		VersionedParams(&opts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Watch()
+		Watch(ctx)
 }
 
 // Create takes the representation of a lease and creates it.  Returns the server's representation of the lease, and an error, if there is any.
-func (c *leases) Create(lease *v1beta1.Lease) (result *v1beta1.Lease, err error) {
+func (c *leases) Create(ctx context.Context, lease *v1beta1.Lease, opts v1.CreateOptions) (result *v1beta1.Lease, err error) {
 	result = &v1beta1.Lease{}
 	err = c.client.Post().
 		Namespace(c.ns).
 		Resource("leases").
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(lease).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Update takes the representation of a lease and updates it. Returns the server's representation of the lease, and an error, if there is any.
-func (c *leases) Update(lease *v1beta1.Lease) (result *v1beta1.Lease, err error) {
+func (c *leases) Update(ctx context.Context, lease *v1beta1.Lease, opts v1.UpdateOptions) (result *v1beta1.Lease, err error) {
 	result = &v1beta1.Lease{}
 	err = c.client.Put().
 		Namespace(c.ns).
 		Resource("leases").
 		Name(lease.Name).
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(lease).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Delete takes name of the lease and deletes it. Returns an error if one occurs.
-func (c *leases) Delete(name string, options *v1.DeleteOptions) error {
+func (c *leases) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
 	return c.client.Delete().
 		Namespace(c.ns).
 		Resource("leases").
 		Name(name).
-		Body(options).
-		Do().
+		Body(&opts).
+		Do(ctx).
 		Error()
 }
 
 // DeleteCollection deletes a collection of objects.
-func (c *leases) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
+func (c *leases) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
 	var timeout time.Duration
-	if listOptions.TimeoutSeconds != nil {
-		timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second
+	if listOpts.TimeoutSeconds != nil {
+		timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
 	}
 	return c.client.Delete().
 		Namespace(c.ns).
 		Resource("leases").
-		VersionedParams(&listOptions, scheme.ParameterCodec).
+		VersionedParams(&listOpts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Body(options).
-		Do().
+		Body(&opts).
+		Do(ctx).
 		Error()
 }
 
 // Patch applies the patch and returns the patched lease.
-func (c *leases) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.Lease, err error) {
+func (c *leases) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.Lease, err error) {
 	result = &v1beta1.Lease{}
 	err = c.client.Patch(pt).
 		Namespace(c.ns).
 		Resource("leases").
-		SubResource(subresources...).
 		Name(name).
+		SubResource(subresources...).
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(data).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/componentstatus.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/componentstatus.go
index 302b2fdc..faf5d19c 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/componentstatus.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/componentstatus.go
@@ -19,6 +19,7 @@ limitations under the License.
 package v1
 
 import (
+	"context"
 	"time"
 
 	v1 "k8s.io/api/core/v1"
@@ -37,14 +38,14 @@ type ComponentStatusesGetter interface {
 
 // ComponentStatusInterface has methods to work with ComponentStatus resources.
 type ComponentStatusInterface interface {
-	Create(*v1.ComponentStatus) (*v1.ComponentStatus, error)
-	Update(*v1.ComponentStatus) (*v1.ComponentStatus, error)
-	Delete(name string, options *metav1.DeleteOptions) error
-	DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error
-	Get(name string, options metav1.GetOptions) (*v1.ComponentStatus, error)
-	List(opts metav1.ListOptions) (*v1.ComponentStatusList, error)
-	Watch(opts metav1.ListOptions) (watch.Interface, error)
-	Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.ComponentStatus, err error)
+	Create(ctx context.Context, componentStatus *v1.ComponentStatus, opts metav1.CreateOptions) (*v1.ComponentStatus, error)
+	Update(ctx context.Context, componentStatus *v1.ComponentStatus, opts metav1.UpdateOptions) (*v1.ComponentStatus, error)
+	Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error
+	DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error
+	Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.ComponentStatus, error)
+	List(ctx context.Context, opts metav1.ListOptions) (*v1.ComponentStatusList, error)
+	Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error)
+	Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ComponentStatus, err error)
 	ComponentStatusExpansion
 }
 
@@ -61,19 +62,19 @@ func newComponentStatuses(c *CoreV1Client) *componentStatuses {
 }
 
 // Get takes name of the componentStatus, and returns the corresponding componentStatus object, and an error if there is any.
-func (c *componentStatuses) Get(name string, options metav1.GetOptions) (result *v1.ComponentStatus, err error) {
+func (c *componentStatuses) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.ComponentStatus, err error) {
 	result = &v1.ComponentStatus{}
 	err = c.client.Get().
 		Resource("componentstatuses").
 		Name(name).
 		VersionedParams(&options, scheme.ParameterCodec).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // List takes label and field selectors, and returns the list of ComponentStatuses that match those selectors.
-func (c *componentStatuses) List(opts metav1.ListOptions) (result *v1.ComponentStatusList, err error) {
+func (c *componentStatuses) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ComponentStatusList, err error) {
 	var timeout time.Duration
 	if opts.TimeoutSeconds != nil {
 		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
@@ -83,13 +84,13 @@ func (c *componentStatuses) List(opts metav1.ListOptions) (result *v1.ComponentS
 		Resource("componentstatuses").
 		VersionedParams(&opts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Watch returns a watch.Interface that watches the requested componentStatuses.
-func (c *componentStatuses) Watch(opts metav1.ListOptions) (watch.Interface, error) {
+func (c *componentStatuses) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
 	var timeout time.Duration
 	if opts.TimeoutSeconds != nil {
 		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
@@ -99,66 +100,69 @@ func (c *componentStatuses) Watch(opts metav1.ListOptions) (watch.Interface, err
 		Resource("componentstatuses").
 		VersionedParams(&opts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Watch()
+		Watch(ctx)
 }
 
 // Create takes the representation of a componentStatus and creates it.  Returns the server's representation of the componentStatus, and an error, if there is any.
-func (c *componentStatuses) Create(componentStatus *v1.ComponentStatus) (result *v1.ComponentStatus, err error) {
+func (c *componentStatuses) Create(ctx context.Context, componentStatus *v1.ComponentStatus, opts metav1.CreateOptions) (result *v1.ComponentStatus, err error) {
 	result = &v1.ComponentStatus{}
 	err = c.client.Post().
 		Resource("componentstatuses").
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(componentStatus).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Update takes the representation of a componentStatus and updates it. Returns the server's representation of the componentStatus, and an error, if there is any.
-func (c *componentStatuses) Update(componentStatus *v1.ComponentStatus) (result *v1.ComponentStatus, err error) {
+func (c *componentStatuses) Update(ctx context.Context, componentStatus *v1.ComponentStatus, opts metav1.UpdateOptions) (result *v1.ComponentStatus, err error) {
 	result = &v1.ComponentStatus{}
 	err = c.client.Put().
 		Resource("componentstatuses").
 		Name(componentStatus.Name).
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(componentStatus).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Delete takes name of the componentStatus and deletes it. Returns an error if one occurs.
-func (c *componentStatuses) Delete(name string, options *metav1.DeleteOptions) error {
+func (c *componentStatuses) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error {
 	return c.client.Delete().
 		Resource("componentstatuses").
 		Name(name).
-		Body(options).
-		Do().
+		Body(&opts).
+		Do(ctx).
 		Error()
 }
 
 // DeleteCollection deletes a collection of objects.
-func (c *componentStatuses) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error {
+func (c *componentStatuses) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error {
 	var timeout time.Duration
-	if listOptions.TimeoutSeconds != nil {
-		timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second
+	if listOpts.TimeoutSeconds != nil {
+		timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
 	}
 	return c.client.Delete().
 		Resource("componentstatuses").
-		VersionedParams(&listOptions, scheme.ParameterCodec).
+		VersionedParams(&listOpts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Body(options).
-		Do().
+		Body(&opts).
+		Do(ctx).
 		Error()
 }
 
 // Patch applies the patch and returns the patched componentStatus.
-func (c *componentStatuses) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.ComponentStatus, err error) {
+func (c *componentStatuses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ComponentStatus, err error) {
 	result = &v1.ComponentStatus{}
 	err = c.client.Patch(pt).
 		Resource("componentstatuses").
-		SubResource(subresources...).
 		Name(name).
+		SubResource(subresources...).
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(data).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/configmap.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/configmap.go
index 18ce954a..407d25a4 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/configmap.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/configmap.go
@@ -19,6 +19,7 @@ limitations under the License.
 package v1
 
 import (
+	"context"
 	"time"
 
 	v1 "k8s.io/api/core/v1"
@@ -37,14 +38,14 @@ type ConfigMapsGetter interface {
 
 // ConfigMapInterface has methods to work with ConfigMap resources.
 type ConfigMapInterface interface {
-	Create(*v1.ConfigMap) (*v1.ConfigMap, error)
-	Update(*v1.ConfigMap) (*v1.ConfigMap, error)
-	Delete(name string, options *metav1.DeleteOptions) error
-	DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error
-	Get(name string, options metav1.GetOptions) (*v1.ConfigMap, error)
-	List(opts metav1.ListOptions) (*v1.ConfigMapList, error)
-	Watch(opts metav1.ListOptions) (watch.Interface, error)
-	Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.ConfigMap, err error)
+	Create(ctx context.Context, configMap *v1.ConfigMap, opts metav1.CreateOptions) (*v1.ConfigMap, error)
+	Update(ctx context.Context, configMap *v1.ConfigMap, opts metav1.UpdateOptions) (*v1.ConfigMap, error)
+	Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error
+	DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error
+	Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.ConfigMap, error)
+	List(ctx context.Context, opts metav1.ListOptions) (*v1.ConfigMapList, error)
+	Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error)
+	Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ConfigMap, err error)
 	ConfigMapExpansion
 }
 
@@ -63,20 +64,20 @@ func newConfigMaps(c *CoreV1Client, namespace string) *configMaps {
 }
 
 // Get takes name of the configMap, and returns the corresponding configMap object, and an error if there is any.
-func (c *configMaps) Get(name string, options metav1.GetOptions) (result *v1.ConfigMap, err error) {
+func (c *configMaps) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.ConfigMap, err error) {
 	result = &v1.ConfigMap{}
 	err = c.client.Get().
 		Namespace(c.ns).
 		Resource("configmaps").
 		Name(name).
 		VersionedParams(&options, scheme.ParameterCodec).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // List takes label and field selectors, and returns the list of ConfigMaps that match those selectors.
-func (c *configMaps) List(opts metav1.ListOptions) (result *v1.ConfigMapList, err error) {
+func (c *configMaps) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ConfigMapList, err error) {
 	var timeout time.Duration
 	if opts.TimeoutSeconds != nil {
 		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
@@ -87,13 +88,13 @@ func (c *configMaps) List(opts metav1.ListOptions) (result *v1.ConfigMapList, er
 		Resource("configmaps").
 		VersionedParams(&opts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Watch returns a watch.Interface that watches the requested configMaps.
-func (c *configMaps) Watch(opts metav1.ListOptions) (watch.Interface, error) {
+func (c *configMaps) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
 	var timeout time.Duration
 	if opts.TimeoutSeconds != nil {
 		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
@@ -104,71 +105,74 @@ func (c *configMaps) Watch(opts metav1.ListOptions) (watch.Interface, error) {
 		Resource("configmaps").
 		VersionedParams(&opts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Watch()
+		Watch(ctx)
 }
 
 // Create takes the representation of a configMap and creates it.  Returns the server's representation of the configMap, and an error, if there is any.
-func (c *configMaps) Create(configMap *v1.ConfigMap) (result *v1.ConfigMap, err error) {
+func (c *configMaps) Create(ctx context.Context, configMap *v1.ConfigMap, opts metav1.CreateOptions) (result *v1.ConfigMap, err error) {
 	result = &v1.ConfigMap{}
 	err = c.client.Post().
 		Namespace(c.ns).
 		Resource("configmaps").
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(configMap).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Update takes the representation of a configMap and updates it. Returns the server's representation of the configMap, and an error, if there is any.
-func (c *configMaps) Update(configMap *v1.ConfigMap) (result *v1.ConfigMap, err error) {
+func (c *configMaps) Update(ctx context.Context, configMap *v1.ConfigMap, opts metav1.UpdateOptions) (result *v1.ConfigMap, err error) {
 	result = &v1.ConfigMap{}
 	err = c.client.Put().
 		Namespace(c.ns).
 		Resource("configmaps").
 		Name(configMap.Name).
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(configMap).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Delete takes name of the configMap and deletes it. Returns an error if one occurs.
-func (c *configMaps) Delete(name string, options *metav1.DeleteOptions) error {
+func (c *configMaps) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error {
 	return c.client.Delete().
 		Namespace(c.ns).
 		Resource("configmaps").
 		Name(name).
-		Body(options).
-		Do().
+		Body(&opts).
+		Do(ctx).
 		Error()
 }
 
 // DeleteCollection deletes a collection of objects.
-func (c *configMaps) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error {
+func (c *configMaps) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error {
 	var timeout time.Duration
-	if listOptions.TimeoutSeconds != nil {
-		timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second
+	if listOpts.TimeoutSeconds != nil {
+		timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
 	}
 	return c.client.Delete().
 		Namespace(c.ns).
 		Resource("configmaps").
-		VersionedParams(&listOptions, scheme.ParameterCodec).
+		VersionedParams(&listOpts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Body(options).
-		Do().
+		Body(&opts).
+		Do(ctx).
 		Error()
 }
 
 // Patch applies the patch and returns the patched configMap.
-func (c *configMaps) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.ConfigMap, err error) {
+func (c *configMaps) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ConfigMap, err error) {
 	result = &v1.ConfigMap{}
 	err = c.client.Patch(pt).
 		Namespace(c.ns).
 		Resource("configmaps").
-		SubResource(subresources...).
 		Name(name).
+		SubResource(subresources...).
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(data).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/endpoints.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/endpoints.go
index 978a2a19..c36eaaa4 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/endpoints.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/endpoints.go
@@ -19,6 +19,7 @@ limitations under the License.
 package v1
 
 import (
+	"context"
 	"time"
 
 	v1 "k8s.io/api/core/v1"
@@ -37,14 +38,14 @@ type EndpointsGetter interface {
 
 // EndpointsInterface has methods to work with Endpoints resources.
 type EndpointsInterface interface {
-	Create(*v1.Endpoints) (*v1.Endpoints, error)
-	Update(*v1.Endpoints) (*v1.Endpoints, error)
-	Delete(name string, options *metav1.DeleteOptions) error
-	DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error
-	Get(name string, options metav1.GetOptions) (*v1.Endpoints, error)
-	List(opts metav1.ListOptions) (*v1.EndpointsList, error)
-	Watch(opts metav1.ListOptions) (watch.Interface, error)
-	Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Endpoints, err error)
+	Create(ctx context.Context, endpoints *v1.Endpoints, opts metav1.CreateOptions) (*v1.Endpoints, error)
+	Update(ctx context.Context, endpoints *v1.Endpoints, opts metav1.UpdateOptions) (*v1.Endpoints, error)
+	Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error
+	DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error
+	Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.Endpoints, error)
+	List(ctx context.Context, opts metav1.ListOptions) (*v1.EndpointsList, error)
+	Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error)
+	Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Endpoints, err error)
 	EndpointsExpansion
 }
 
@@ -63,20 +64,20 @@ func newEndpoints(c *CoreV1Client, namespace string) *endpoints {
 }
 
 // Get takes name of the endpoints, and returns the corresponding endpoints object, and an error if there is any.
-func (c *endpoints) Get(name string, options metav1.GetOptions) (result *v1.Endpoints, err error) {
+func (c *endpoints) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Endpoints, err error) {
 	result = &v1.Endpoints{}
 	err = c.client.Get().
 		Namespace(c.ns).
 		Resource("endpoints").
 		Name(name).
 		VersionedParams(&options, scheme.ParameterCodec).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // List takes label and field selectors, and returns the list of Endpoints that match those selectors.
-func (c *endpoints) List(opts metav1.ListOptions) (result *v1.EndpointsList, err error) {
+func (c *endpoints) List(ctx context.Context, opts metav1.ListOptions) (result *v1.EndpointsList, err error) {
 	var timeout time.Duration
 	if opts.TimeoutSeconds != nil {
 		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
@@ -87,13 +88,13 @@ func (c *endpoints) List(opts metav1.ListOptions) (result *v1.EndpointsList, err
 		Resource("endpoints").
 		VersionedParams(&opts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Watch returns a watch.Interface that watches the requested endpoints.
-func (c *endpoints) Watch(opts metav1.ListOptions) (watch.Interface, error) {
+func (c *endpoints) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
 	var timeout time.Duration
 	if opts.TimeoutSeconds != nil {
 		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
@@ -104,71 +105,74 @@ func (c *endpoints) Watch(opts metav1.ListOptions) (watch.Interface, error) {
 		Resource("endpoints").
 		VersionedParams(&opts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Watch()
+		Watch(ctx)
 }
 
 // Create takes the representation of a endpoints and creates it.  Returns the server's representation of the endpoints, and an error, if there is any.
-func (c *endpoints) Create(endpoints *v1.Endpoints) (result *v1.Endpoints, err error) {
+func (c *endpoints) Create(ctx context.Context, endpoints *v1.Endpoints, opts metav1.CreateOptions) (result *v1.Endpoints, err error) {
 	result = &v1.Endpoints{}
 	err = c.client.Post().
 		Namespace(c.ns).
 		Resource("endpoints").
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(endpoints).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Update takes the representation of a endpoints and updates it. Returns the server's representation of the endpoints, and an error, if there is any.
-func (c *endpoints) Update(endpoints *v1.Endpoints) (result *v1.Endpoints, err error) {
+func (c *endpoints) Update(ctx context.Context, endpoints *v1.Endpoints, opts metav1.UpdateOptions) (result *v1.Endpoints, err error) {
 	result = &v1.Endpoints{}
 	err = c.client.Put().
 		Namespace(c.ns).
 		Resource("endpoints").
 		Name(endpoints.Name).
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(endpoints).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Delete takes name of the endpoints and deletes it. Returns an error if one occurs.
-func (c *endpoints) Delete(name string, options *metav1.DeleteOptions) error {
+func (c *endpoints) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error {
 	return c.client.Delete().
 		Namespace(c.ns).
 		Resource("endpoints").
 		Name(name).
-		Body(options).
-		Do().
+		Body(&opts).
+		Do(ctx).
 		Error()
 }
 
 // DeleteCollection deletes a collection of objects.
-func (c *endpoints) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error {
+func (c *endpoints) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error {
 	var timeout time.Duration
-	if listOptions.TimeoutSeconds != nil {
-		timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second
+	if listOpts.TimeoutSeconds != nil {
+		timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
 	}
 	return c.client.Delete().
 		Namespace(c.ns).
 		Resource("endpoints").
-		VersionedParams(&listOptions, scheme.ParameterCodec).
+		VersionedParams(&listOpts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Body(options).
-		Do().
+		Body(&opts).
+		Do(ctx).
 		Error()
 }
 
 // Patch applies the patch and returns the patched endpoints.
-func (c *endpoints) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Endpoints, err error) {
+func (c *endpoints) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Endpoints, err error) {
 	result = &v1.Endpoints{}
 	err = c.client.Patch(pt).
 		Namespace(c.ns).
 		Resource("endpoints").
-		SubResource(subresources...).
 		Name(name).
+		SubResource(subresources...).
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(data).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/event.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/event.go
index 55cfa090..9b669920 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/event.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/event.go
@@ -19,6 +19,7 @@ limitations under the License.
 package v1
 
 import (
+	"context"
 	"time"
 
 	v1 "k8s.io/api/core/v1"
@@ -37,14 +38,14 @@ type EventsGetter interface {
 
 // EventInterface has methods to work with Event resources.
 type EventInterface interface {
-	Create(*v1.Event) (*v1.Event, error)
-	Update(*v1.Event) (*v1.Event, error)
-	Delete(name string, options *metav1.DeleteOptions) error
-	DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error
-	Get(name string, options metav1.GetOptions) (*v1.Event, error)
-	List(opts metav1.ListOptions) (*v1.EventList, error)
-	Watch(opts metav1.ListOptions) (watch.Interface, error)
-	Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Event, err error)
+	Create(ctx context.Context, event *v1.Event, opts metav1.CreateOptions) (*v1.Event, error)
+	Update(ctx context.Context, event *v1.Event, opts metav1.UpdateOptions) (*v1.Event, error)
+	Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error
+	DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error
+	Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.Event, error)
+	List(ctx context.Context, opts metav1.ListOptions) (*v1.EventList, error)
+	Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error)
+	Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Event, err error)
 	EventExpansion
 }
 
@@ -63,20 +64,20 @@ func newEvents(c *CoreV1Client, namespace string) *events {
 }
 
 // Get takes name of the event, and returns the corresponding event object, and an error if there is any.
-func (c *events) Get(name string, options metav1.GetOptions) (result *v1.Event, err error) {
+func (c *events) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Event, err error) {
 	result = &v1.Event{}
 	err = c.client.Get().
 		Namespace(c.ns).
 		Resource("events").
 		Name(name).
 		VersionedParams(&options, scheme.ParameterCodec).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // List takes label and field selectors, and returns the list of Events that match those selectors.
-func (c *events) List(opts metav1.ListOptions) (result *v1.EventList, err error) {
+func (c *events) List(ctx context.Context, opts metav1.ListOptions) (result *v1.EventList, err error) {
 	var timeout time.Duration
 	if opts.TimeoutSeconds != nil {
 		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
@@ -87,13 +88,13 @@ func (c *events) List(opts metav1.ListOptions) (result *v1.EventList, err error)
 		Resource("events").
 		VersionedParams(&opts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Watch returns a watch.Interface that watches the requested events.
-func (c *events) Watch(opts metav1.ListOptions) (watch.Interface, error) {
+func (c *events) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
 	var timeout time.Duration
 	if opts.TimeoutSeconds != nil {
 		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
@@ -104,71 +105,74 @@ func (c *events) Watch(opts metav1.ListOptions) (watch.Interface, error) {
 		Resource("events").
 		VersionedParams(&opts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Watch()
+		Watch(ctx)
 }
 
 // Create takes the representation of a event and creates it.  Returns the server's representation of the event, and an error, if there is any.
-func (c *events) Create(event *v1.Event) (result *v1.Event, err error) {
+func (c *events) Create(ctx context.Context, event *v1.Event, opts metav1.CreateOptions) (result *v1.Event, err error) {
 	result = &v1.Event{}
 	err = c.client.Post().
 		Namespace(c.ns).
 		Resource("events").
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(event).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Update takes the representation of a event and updates it. Returns the server's representation of the event, and an error, if there is any.
-func (c *events) Update(event *v1.Event) (result *v1.Event, err error) {
+func (c *events) Update(ctx context.Context, event *v1.Event, opts metav1.UpdateOptions) (result *v1.Event, err error) {
 	result = &v1.Event{}
 	err = c.client.Put().
 		Namespace(c.ns).
 		Resource("events").
 		Name(event.Name).
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(event).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Delete takes name of the event and deletes it. Returns an error if one occurs.
-func (c *events) Delete(name string, options *metav1.DeleteOptions) error {
+func (c *events) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error {
 	return c.client.Delete().
 		Namespace(c.ns).
 		Resource("events").
 		Name(name).
-		Body(options).
-		Do().
+		Body(&opts).
+		Do(ctx).
 		Error()
 }
 
 // DeleteCollection deletes a collection of objects.
-func (c *events) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error {
+func (c *events) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error {
 	var timeout time.Duration
-	if listOptions.TimeoutSeconds != nil {
-		timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second
+	if listOpts.TimeoutSeconds != nil {
+		timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
 	}
 	return c.client.Delete().
 		Namespace(c.ns).
 		Resource("events").
-		VersionedParams(&listOptions, scheme.ParameterCodec).
+		VersionedParams(&listOpts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Body(options).
-		Do().
+		Body(&opts).
+		Do(ctx).
 		Error()
 }
 
 // Patch applies the patch and returns the patched event.
-func (c *events) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Event, err error) {
+func (c *events) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Event, err error) {
 	result = &v1.Event{}
 	err = c.client.Patch(pt).
 		Namespace(c.ns).
 		Resource("events").
-		SubResource(subresources...).
 		Name(name).
+		SubResource(subresources...).
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(data).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/event_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/event_expansion.go
index 5a82afa4..211cf060 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/event_expansion.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/event_expansion.go
@@ -17,9 +17,10 @@ limitations under the License.
 package v1
 
 import (
+	"context"
 	"fmt"
 
-	"k8s.io/api/core/v1"
+	v1 "k8s.io/api/core/v1"
 	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
 	"k8s.io/apimachinery/pkg/fields"
 	"k8s.io/apimachinery/pkg/runtime"
@@ -54,7 +55,7 @@ func (e *events) CreateWithEventNamespace(event *v1.Event) (*v1.Event, error) {
 		NamespaceIfScoped(event.Namespace, len(event.Namespace) > 0).
 		Resource("events").
 		Body(event).
-		Do().
+		Do(context.TODO()).
 		Into(result)
 	return result, err
 }
@@ -71,7 +72,7 @@ func (e *events) UpdateWithEventNamespace(event *v1.Event) (*v1.Event, error) {
 		Resource("events").
 		Name(event.Name).
 		Body(event).
-		Do().
+		Do(context.TODO()).
 		Into(result)
 	return result, err
 }
@@ -91,7 +92,7 @@ func (e *events) PatchWithEventNamespace(incompleteEvent *v1.Event, data []byte)
 		Resource("events").
 		Name(incompleteEvent.Name).
 		Body(data).
-		Do().
+		Do(context.TODO()).
 		Into(result)
 	return result, err
 }
@@ -118,7 +119,7 @@ func (e *events) Search(scheme *runtime.Scheme, objOrRef runtime.Object) (*v1.Ev
 		refUID = &stringRefUID
 	}
 	fieldSelector := e.GetFieldSelector(&ref.Name, &ref.Namespace, refKind, refUID)
-	return e.List(metav1.ListOptions{FieldSelector: fieldSelector.String()})
+	return e.List(context.TODO(), metav1.ListOptions{FieldSelector: fieldSelector.String()})
 }
 
 // Returns the appropriate field selector based on the API version being used to communicate with the server.
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/generated_expansion.go
index 6e8591b1..2cb81aa4 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/generated_expansion.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/generated_expansion.go
@@ -37,3 +37,5 @@ type ReplicationControllerExpansion interface{}
 type ResourceQuotaExpansion interface{}
 
 type SecretExpansion interface{}
+
+type ServiceAccountExpansion interface{}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/limitrange.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/limitrange.go
index 2eeae11a..7031cd77 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/limitrange.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/limitrange.go
@@ -19,6 +19,7 @@ limitations under the License.
 package v1
 
 import (
+	"context"
 	"time"
 
 	v1 "k8s.io/api/core/v1"
@@ -37,14 +38,14 @@ type LimitRangesGetter interface {
 
 // LimitRangeInterface has methods to work with LimitRange resources.
 type LimitRangeInterface interface {
-	Create(*v1.LimitRange) (*v1.LimitRange, error)
-	Update(*v1.LimitRange) (*v1.LimitRange, error)
-	Delete(name string, options *metav1.DeleteOptions) error
-	DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error
-	Get(name string, options metav1.GetOptions) (*v1.LimitRange, error)
-	List(opts metav1.ListOptions) (*v1.LimitRangeList, error)
-	Watch(opts metav1.ListOptions) (watch.Interface, error)
-	Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.LimitRange, err error)
+	Create(ctx context.Context, limitRange *v1.LimitRange, opts metav1.CreateOptions) (*v1.LimitRange, error)
+	Update(ctx context.Context, limitRange *v1.LimitRange, opts metav1.UpdateOptions) (*v1.LimitRange, error)
+	Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error
+	DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error
+	Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.LimitRange, error)
+	List(ctx context.Context, opts metav1.ListOptions) (*v1.LimitRangeList, error)
+	Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error)
+	Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.LimitRange, err error)
 	LimitRangeExpansion
 }
 
@@ -63,20 +64,20 @@ func newLimitRanges(c *CoreV1Client, namespace string) *limitRanges {
 }
 
 // Get takes name of the limitRange, and returns the corresponding limitRange object, and an error if there is any.
-func (c *limitRanges) Get(name string, options metav1.GetOptions) (result *v1.LimitRange, err error) {
+func (c *limitRanges) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.LimitRange, err error) {
 	result = &v1.LimitRange{}
 	err = c.client.Get().
 		Namespace(c.ns).
 		Resource("limitranges").
 		Name(name).
 		VersionedParams(&options, scheme.ParameterCodec).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // List takes label and field selectors, and returns the list of LimitRanges that match those selectors.
-func (c *limitRanges) List(opts metav1.ListOptions) (result *v1.LimitRangeList, err error) {
+func (c *limitRanges) List(ctx context.Context, opts metav1.ListOptions) (result *v1.LimitRangeList, err error) {
 	var timeout time.Duration
 	if opts.TimeoutSeconds != nil {
 		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
@@ -87,13 +88,13 @@ func (c *limitRanges) List(opts metav1.ListOptions) (result *v1.LimitRangeList,
 		Resource("limitranges").
 		VersionedParams(&opts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Watch returns a watch.Interface that watches the requested limitRanges.
-func (c *limitRanges) Watch(opts metav1.ListOptions) (watch.Interface, error) {
+func (c *limitRanges) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
 	var timeout time.Duration
 	if opts.TimeoutSeconds != nil {
 		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
@@ -104,71 +105,74 @@ func (c *limitRanges) Watch(opts metav1.ListOptions) (watch.Interface, error) {
 		Resource("limitranges").
 		VersionedParams(&opts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Watch()
+		Watch(ctx)
 }
 
 // Create takes the representation of a limitRange and creates it.  Returns the server's representation of the limitRange, and an error, if there is any.
-func (c *limitRanges) Create(limitRange *v1.LimitRange) (result *v1.LimitRange, err error) {
+func (c *limitRanges) Create(ctx context.Context, limitRange *v1.LimitRange, opts metav1.CreateOptions) (result *v1.LimitRange, err error) {
 	result = &v1.LimitRange{}
 	err = c.client.Post().
 		Namespace(c.ns).
 		Resource("limitranges").
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(limitRange).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Update takes the representation of a limitRange and updates it. Returns the server's representation of the limitRange, and an error, if there is any.
-func (c *limitRanges) Update(limitRange *v1.LimitRange) (result *v1.LimitRange, err error) {
+func (c *limitRanges) Update(ctx context.Context, limitRange *v1.LimitRange, opts metav1.UpdateOptions) (result *v1.LimitRange, err error) {
 	result = &v1.LimitRange{}
 	err = c.client.Put().
 		Namespace(c.ns).
 		Resource("limitranges").
 		Name(limitRange.Name).
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(limitRange).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Delete takes name of the limitRange and deletes it. Returns an error if one occurs.
-func (c *limitRanges) Delete(name string, options *metav1.DeleteOptions) error {
+func (c *limitRanges) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error {
 	return c.client.Delete().
 		Namespace(c.ns).
 		Resource("limitranges").
 		Name(name).
-		Body(options).
-		Do().
+		Body(&opts).
+		Do(ctx).
 		Error()
 }
 
 // DeleteCollection deletes a collection of objects.
-func (c *limitRanges) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error {
+func (c *limitRanges) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error {
 	var timeout time.Duration
-	if listOptions.TimeoutSeconds != nil {
-		timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second
+	if listOpts.TimeoutSeconds != nil {
+		timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
 	}
 	return c.client.Delete().
 		Namespace(c.ns).
 		Resource("limitranges").
-		VersionedParams(&listOptions, scheme.ParameterCodec).
+		VersionedParams(&listOpts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Body(options).
-		Do().
+		Body(&opts).
+		Do(ctx).
 		Error()
 }
 
 // Patch applies the patch and returns the patched limitRange.
-func (c *limitRanges) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.LimitRange, err error) {
+func (c *limitRanges) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.LimitRange, err error) {
 	result = &v1.LimitRange{}
 	err = c.client.Patch(pt).
 		Namespace(c.ns).
 		Resource("limitranges").
-		SubResource(subresources...).
 		Name(name).
+		SubResource(subresources...).
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(data).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/namespace.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/namespace.go
index 8a81fe85..55b03d65 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/namespace.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/namespace.go
@@ -19,6 +19,7 @@ limitations under the License.
 package v1
 
 import (
+	"context"
 	"time"
 
 	v1 "k8s.io/api/core/v1"
@@ -37,14 +38,14 @@ type NamespacesGetter interface {
 
 // NamespaceInterface has methods to work with Namespace resources.
 type NamespaceInterface interface {
-	Create(*v1.Namespace) (*v1.Namespace, error)
-	Update(*v1.Namespace) (*v1.Namespace, error)
-	UpdateStatus(*v1.Namespace) (*v1.Namespace, error)
-	Delete(name string, options *metav1.DeleteOptions) error
-	Get(name string, options metav1.GetOptions) (*v1.Namespace, error)
-	List(opts metav1.ListOptions) (*v1.NamespaceList, error)
-	Watch(opts metav1.ListOptions) (watch.Interface, error)
-	Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Namespace, err error)
+	Create(ctx context.Context, namespace *v1.Namespace, opts metav1.CreateOptions) (*v1.Namespace, error)
+	Update(ctx context.Context, namespace *v1.Namespace, opts metav1.UpdateOptions) (*v1.Namespace, error)
+	UpdateStatus(ctx context.Context, namespace *v1.Namespace, opts metav1.UpdateOptions) (*v1.Namespace, error)
+	Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error
+	Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.Namespace, error)
+	List(ctx context.Context, opts metav1.ListOptions) (*v1.NamespaceList, error)
+	Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error)
+	Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Namespace, err error)
 	NamespaceExpansion
 }
 
@@ -61,19 +62,19 @@ func newNamespaces(c *CoreV1Client) *namespaces {
 }
 
 // Get takes name of the namespace, and returns the corresponding namespace object, and an error if there is any.
-func (c *namespaces) Get(name string, options metav1.GetOptions) (result *v1.Namespace, err error) {
+func (c *namespaces) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Namespace, err error) {
 	result = &v1.Namespace{}
 	err = c.client.Get().
 		Resource("namespaces").
 		Name(name).
 		VersionedParams(&options, scheme.ParameterCodec).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // List takes label and field selectors, and returns the list of Namespaces that match those selectors.
-func (c *namespaces) List(opts metav1.ListOptions) (result *v1.NamespaceList, err error) {
+func (c *namespaces) List(ctx context.Context, opts metav1.ListOptions) (result *v1.NamespaceList, err error) {
 	var timeout time.Duration
 	if opts.TimeoutSeconds != nil {
 		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
@@ -83,13 +84,13 @@ func (c *namespaces) List(opts metav1.ListOptions) (result *v1.NamespaceList, er
 		Resource("namespaces").
 		VersionedParams(&opts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Watch returns a watch.Interface that watches the requested namespaces.
-func (c *namespaces) Watch(opts metav1.ListOptions) (watch.Interface, error) {
+func (c *namespaces) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
 	var timeout time.Duration
 	if opts.TimeoutSeconds != nil {
 		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
@@ -99,66 +100,69 @@ func (c *namespaces) Watch(opts metav1.ListOptions) (watch.Interface, error) {
 		Resource("namespaces").
 		VersionedParams(&opts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Watch()
+		Watch(ctx)
 }
 
 // Create takes the representation of a namespace and creates it.  Returns the server's representation of the namespace, and an error, if there is any.
-func (c *namespaces) Create(namespace *v1.Namespace) (result *v1.Namespace, err error) {
+func (c *namespaces) Create(ctx context.Context, namespace *v1.Namespace, opts metav1.CreateOptions) (result *v1.Namespace, err error) {
 	result = &v1.Namespace{}
 	err = c.client.Post().
 		Resource("namespaces").
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(namespace).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Update takes the representation of a namespace and updates it. Returns the server's representation of the namespace, and an error, if there is any.
-func (c *namespaces) Update(namespace *v1.Namespace) (result *v1.Namespace, err error) {
+func (c *namespaces) Update(ctx context.Context, namespace *v1.Namespace, opts metav1.UpdateOptions) (result *v1.Namespace, err error) {
 	result = &v1.Namespace{}
 	err = c.client.Put().
 		Resource("namespaces").
 		Name(namespace.Name).
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(namespace).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // UpdateStatus was generated because the type contains a Status member.
 // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
-
-func (c *namespaces) UpdateStatus(namespace *v1.Namespace) (result *v1.Namespace, err error) {
+func (c *namespaces) UpdateStatus(ctx context.Context, namespace *v1.Namespace, opts metav1.UpdateOptions) (result *v1.Namespace, err error) {
 	result = &v1.Namespace{}
 	err = c.client.Put().
 		Resource("namespaces").
 		Name(namespace.Name).
 		SubResource("status").
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(namespace).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Delete takes name of the namespace and deletes it. Returns an error if one occurs.
-func (c *namespaces) Delete(name string, options *metav1.DeleteOptions) error {
+func (c *namespaces) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error {
 	return c.client.Delete().
 		Resource("namespaces").
 		Name(name).
-		Body(options).
-		Do().
+		Body(&opts).
+		Do(ctx).
 		Error()
 }
 
 // Patch applies the patch and returns the patched namespace.
-func (c *namespaces) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Namespace, err error) {
+func (c *namespaces) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Namespace, err error) {
 	result = &v1.Namespace{}
 	err = c.client.Patch(pt).
 		Resource("namespaces").
-		SubResource(subresources...).
 		Name(name).
+		SubResource(subresources...).
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(data).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/namespace_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/namespace_expansion.go
index 17effe29..be1116db 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/namespace_expansion.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/namespace_expansion.go
@@ -16,16 +16,22 @@ limitations under the License.
 
 package v1
 
-import "k8s.io/api/core/v1"
+import (
+	"context"
+
+	v1 "k8s.io/api/core/v1"
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	scheme "k8s.io/client-go/kubernetes/scheme"
+)
 
 // The NamespaceExpansion interface allows manually adding extra methods to the NamespaceInterface.
 type NamespaceExpansion interface {
-	Finalize(item *v1.Namespace) (*v1.Namespace, error)
+	Finalize(ctx context.Context, item *v1.Namespace, opts metav1.UpdateOptions) (*v1.Namespace, error)
 }
 
 // Finalize takes the representation of a namespace to update.  Returns the server's representation of the namespace, and an error, if it occurs.
-func (c *namespaces) Finalize(namespace *v1.Namespace) (result *v1.Namespace, err error) {
+func (c *namespaces) Finalize(ctx context.Context, namespace *v1.Namespace, opts metav1.UpdateOptions) (result *v1.Namespace, err error) {
 	result = &v1.Namespace{}
-	err = c.client.Put().Resource("namespaces").Name(namespace.Name).SubResource("finalize").Body(namespace).Do().Into(result)
+	err = c.client.Put().Resource("namespaces").Name(namespace.Name).VersionedParams(&opts, scheme.ParameterCodec).SubResource("finalize").Body(namespace).Do(ctx).Into(result)
 	return
 }
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/node.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/node.go
index d19fab89..6176808f 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/node.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/node.go
@@ -19,6 +19,7 @@ limitations under the License.
 package v1
 
 import (
+	"context"
 	"time"
 
 	v1 "k8s.io/api/core/v1"
@@ -37,15 +38,15 @@ type NodesGetter interface {
 
 // NodeInterface has methods to work with Node resources.
 type NodeInterface interface {
-	Create(*v1.Node) (*v1.Node, error)
-	Update(*v1.Node) (*v1.Node, error)
-	UpdateStatus(*v1.Node) (*v1.Node, error)
-	Delete(name string, options *metav1.DeleteOptions) error
-	DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error
-	Get(name string, options metav1.GetOptions) (*v1.Node, error)
-	List(opts metav1.ListOptions) (*v1.NodeList, error)
-	Watch(opts metav1.ListOptions) (watch.Interface, error)
-	Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Node, err error)
+	Create(ctx context.Context, node *v1.Node, opts metav1.CreateOptions) (*v1.Node, error)
+	Update(ctx context.Context, node *v1.Node, opts metav1.UpdateOptions) (*v1.Node, error)
+	UpdateStatus(ctx context.Context, node *v1.Node, opts metav1.UpdateOptions) (*v1.Node, error)
+	Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error
+	DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error
+	Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.Node, error)
+	List(ctx context.Context, opts metav1.ListOptions) (*v1.NodeList, error)
+	Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error)
+	Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Node, err error)
 	NodeExpansion
 }
 
@@ -62,19 +63,19 @@ func newNodes(c *CoreV1Client) *nodes {
 }
 
 // Get takes name of the node, and returns the corresponding node object, and an error if there is any.
-func (c *nodes) Get(name string, options metav1.GetOptions) (result *v1.Node, err error) {
+func (c *nodes) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Node, err error) {
 	result = &v1.Node{}
 	err = c.client.Get().
 		Resource("nodes").
 		Name(name).
 		VersionedParams(&options, scheme.ParameterCodec).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // List takes label and field selectors, and returns the list of Nodes that match those selectors.
-func (c *nodes) List(opts metav1.ListOptions) (result *v1.NodeList, err error) {
+func (c *nodes) List(ctx context.Context, opts metav1.ListOptions) (result *v1.NodeList, err error) {
 	var timeout time.Duration
 	if opts.TimeoutSeconds != nil {
 		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
@@ -84,13 +85,13 @@ func (c *nodes) List(opts metav1.ListOptions) (result *v1.NodeList, err error) {
 		Resource("nodes").
 		VersionedParams(&opts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Watch returns a watch.Interface that watches the requested nodes.
-func (c *nodes) Watch(opts metav1.ListOptions) (watch.Interface, error) {
+func (c *nodes) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
 	var timeout time.Duration
 	if opts.TimeoutSeconds != nil {
 		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
@@ -100,81 +101,84 @@ func (c *nodes) Watch(opts metav1.ListOptions) (watch.Interface, error) {
 		Resource("nodes").
 		VersionedParams(&opts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Watch()
+		Watch(ctx)
 }
 
 // Create takes the representation of a node and creates it.  Returns the server's representation of the node, and an error, if there is any.
-func (c *nodes) Create(node *v1.Node) (result *v1.Node, err error) {
+func (c *nodes) Create(ctx context.Context, node *v1.Node, opts metav1.CreateOptions) (result *v1.Node, err error) {
 	result = &v1.Node{}
 	err = c.client.Post().
 		Resource("nodes").
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(node).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Update takes the representation of a node and updates it. Returns the server's representation of the node, and an error, if there is any.
-func (c *nodes) Update(node *v1.Node) (result *v1.Node, err error) {
+func (c *nodes) Update(ctx context.Context, node *v1.Node, opts metav1.UpdateOptions) (result *v1.Node, err error) {
 	result = &v1.Node{}
 	err = c.client.Put().
 		Resource("nodes").
 		Name(node.Name).
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(node).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // UpdateStatus was generated because the type contains a Status member.
 // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
-
-func (c *nodes) UpdateStatus(node *v1.Node) (result *v1.Node, err error) {
+func (c *nodes) UpdateStatus(ctx context.Context, node *v1.Node, opts metav1.UpdateOptions) (result *v1.Node, err error) {
 	result = &v1.Node{}
 	err = c.client.Put().
 		Resource("nodes").
 		Name(node.Name).
 		SubResource("status").
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(node).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Delete takes name of the node and deletes it. Returns an error if one occurs.
-func (c *nodes) Delete(name string, options *metav1.DeleteOptions) error {
+func (c *nodes) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error {
 	return c.client.Delete().
 		Resource("nodes").
 		Name(name).
-		Body(options).
-		Do().
+		Body(&opts).
+		Do(ctx).
 		Error()
 }
 
 // DeleteCollection deletes a collection of objects.
-func (c *nodes) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error {
+func (c *nodes) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error {
 	var timeout time.Duration
-	if listOptions.TimeoutSeconds != nil {
-		timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second
+	if listOpts.TimeoutSeconds != nil {
+		timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
 	}
 	return c.client.Delete().
 		Resource("nodes").
-		VersionedParams(&listOptions, scheme.ParameterCodec).
+		VersionedParams(&listOpts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Body(options).
-		Do().
+		Body(&opts).
+		Do(ctx).
 		Error()
 }
 
 // Patch applies the patch and returns the patched node.
-func (c *nodes) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Node, err error) {
+func (c *nodes) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Node, err error) {
 	result = &v1.Node{}
 	err = c.client.Patch(pt).
 		Resource("nodes").
-		SubResource(subresources...).
 		Name(name).
+		SubResource(subresources...).
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(data).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/node_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/node_expansion.go
index 5db29c3f..bdf7bfed 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/node_expansion.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/node_expansion.go
@@ -17,7 +17,9 @@ limitations under the License.
 package v1
 
 import (
-	"k8s.io/api/core/v1"
+	"context"
+
+	v1 "k8s.io/api/core/v1"
 	"k8s.io/apimachinery/pkg/types"
 )
 
@@ -25,19 +27,19 @@ import (
 type NodeExpansion interface {
 	// PatchStatus modifies the status of an existing node. It returns the copy
 	// of the node that the server returns, or an error.
-	PatchStatus(nodeName string, data []byte) (*v1.Node, error)
+	PatchStatus(ctx context.Context, nodeName string, data []byte) (*v1.Node, error)
 }
 
 // PatchStatus modifies the status of an existing node. It returns the copy of
 // the node that the server returns, or an error.
-func (c *nodes) PatchStatus(nodeName string, data []byte) (*v1.Node, error) {
+func (c *nodes) PatchStatus(ctx context.Context, nodeName string, data []byte) (*v1.Node, error) {
 	result := &v1.Node{}
 	err := c.client.Patch(types.StrategicMergePatchType).
 		Resource("nodes").
 		Name(nodeName).
 		SubResource("status").
 		Body(data).
-		Do().
+		Do(ctx).
 		Into(result)
 	return result, err
 }
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/persistentvolume.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/persistentvolume.go
index 74514825..1eb9db63 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/persistentvolume.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/persistentvolume.go
@@ -19,6 +19,7 @@ limitations under the License.
 package v1
 
 import (
+	"context"
 	"time"
 
 	v1 "k8s.io/api/core/v1"
@@ -37,15 +38,15 @@ type PersistentVolumesGetter interface {
 
 // PersistentVolumeInterface has methods to work with PersistentVolume resources.
 type PersistentVolumeInterface interface {
-	Create(*v1.PersistentVolume) (*v1.PersistentVolume, error)
-	Update(*v1.PersistentVolume) (*v1.PersistentVolume, error)
-	UpdateStatus(*v1.PersistentVolume) (*v1.PersistentVolume, error)
-	Delete(name string, options *metav1.DeleteOptions) error
-	DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error
-	Get(name string, options metav1.GetOptions) (*v1.PersistentVolume, error)
-	List(opts metav1.ListOptions) (*v1.PersistentVolumeList, error)
-	Watch(opts metav1.ListOptions) (watch.Interface, error)
-	Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.PersistentVolume, err error)
+	Create(ctx context.Context, persistentVolume *v1.PersistentVolume, opts metav1.CreateOptions) (*v1.PersistentVolume, error)
+	Update(ctx context.Context, persistentVolume *v1.PersistentVolume, opts metav1.UpdateOptions) (*v1.PersistentVolume, error)
+	UpdateStatus(ctx context.Context, persistentVolume *v1.PersistentVolume, opts metav1.UpdateOptions) (*v1.PersistentVolume, error)
+	Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error
+	DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error
+	Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.PersistentVolume, error)
+	List(ctx context.Context, opts metav1.ListOptions) (*v1.PersistentVolumeList, error)
+	Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error)
+	Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.PersistentVolume, err error)
 	PersistentVolumeExpansion
 }
 
@@ -62,19 +63,19 @@ func newPersistentVolumes(c *CoreV1Client) *persistentVolumes {
 }
 
 // Get takes name of the persistentVolume, and returns the corresponding persistentVolume object, and an error if there is any.
-func (c *persistentVolumes) Get(name string, options metav1.GetOptions) (result *v1.PersistentVolume, err error) {
+func (c *persistentVolumes) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.PersistentVolume, err error) {
 	result = &v1.PersistentVolume{}
 	err = c.client.Get().
 		Resource("persistentvolumes").
 		Name(name).
 		VersionedParams(&options, scheme.ParameterCodec).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // List takes label and field selectors, and returns the list of PersistentVolumes that match those selectors.
-func (c *persistentVolumes) List(opts metav1.ListOptions) (result *v1.PersistentVolumeList, err error) {
+func (c *persistentVolumes) List(ctx context.Context, opts metav1.ListOptions) (result *v1.PersistentVolumeList, err error) {
 	var timeout time.Duration
 	if opts.TimeoutSeconds != nil {
 		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
@@ -84,13 +85,13 @@ func (c *persistentVolumes) List(opts metav1.ListOptions) (result *v1.Persistent
 		Resource("persistentvolumes").
 		VersionedParams(&opts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Watch returns a watch.Interface that watches the requested persistentVolumes.
-func (c *persistentVolumes) Watch(opts metav1.ListOptions) (watch.Interface, error) {
+func (c *persistentVolumes) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
 	var timeout time.Duration
 	if opts.TimeoutSeconds != nil {
 		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
@@ -100,81 +101,84 @@ func (c *persistentVolumes) Watch(opts metav1.ListOptions) (watch.Interface, err
 		Resource("persistentvolumes").
 		VersionedParams(&opts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Watch()
+		Watch(ctx)
 }
 
 // Create takes the representation of a persistentVolume and creates it.  Returns the server's representation of the persistentVolume, and an error, if there is any.
-func (c *persistentVolumes) Create(persistentVolume *v1.PersistentVolume) (result *v1.PersistentVolume, err error) {
+func (c *persistentVolumes) Create(ctx context.Context, persistentVolume *v1.PersistentVolume, opts metav1.CreateOptions) (result *v1.PersistentVolume, err error) {
 	result = &v1.PersistentVolume{}
 	err = c.client.Post().
 		Resource("persistentvolumes").
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(persistentVolume).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Update takes the representation of a persistentVolume and updates it. Returns the server's representation of the persistentVolume, and an error, if there is any.
-func (c *persistentVolumes) Update(persistentVolume *v1.PersistentVolume) (result *v1.PersistentVolume, err error) {
+func (c *persistentVolumes) Update(ctx context.Context, persistentVolume *v1.PersistentVolume, opts metav1.UpdateOptions) (result *v1.PersistentVolume, err error) {
 	result = &v1.PersistentVolume{}
 	err = c.client.Put().
 		Resource("persistentvolumes").
 		Name(persistentVolume.Name).
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(persistentVolume).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // UpdateStatus was generated because the type contains a Status member.
 // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
-
-func (c *persistentVolumes) UpdateStatus(persistentVolume *v1.PersistentVolume) (result *v1.PersistentVolume, err error) {
+func (c *persistentVolumes) UpdateStatus(ctx context.Context, persistentVolume *v1.PersistentVolume, opts metav1.UpdateOptions) (result *v1.PersistentVolume, err error) {
 	result = &v1.PersistentVolume{}
 	err = c.client.Put().
 		Resource("persistentvolumes").
 		Name(persistentVolume.Name).
 		SubResource("status").
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(persistentVolume).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Delete takes name of the persistentVolume and deletes it. Returns an error if one occurs.
-func (c *persistentVolumes) Delete(name string, options *metav1.DeleteOptions) error {
+func (c *persistentVolumes) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error {
 	return c.client.Delete().
 		Resource("persistentvolumes").
 		Name(name).
-		Body(options).
-		Do().
+		Body(&opts).
+		Do(ctx).
 		Error()
 }
 
 // DeleteCollection deletes a collection of objects.
-func (c *persistentVolumes) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error {
+func (c *persistentVolumes) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error {
 	var timeout time.Duration
-	if listOptions.TimeoutSeconds != nil {
-		timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second
+	if listOpts.TimeoutSeconds != nil {
+		timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
 	}
 	return c.client.Delete().
 		Resource("persistentvolumes").
-		VersionedParams(&listOptions, scheme.ParameterCodec).
+		VersionedParams(&listOpts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Body(options).
-		Do().
+		Body(&opts).
+		Do(ctx).
 		Error()
 }
 
 // Patch applies the patch and returns the patched persistentVolume.
-func (c *persistentVolumes) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.PersistentVolume, err error) {
+func (c *persistentVolumes) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.PersistentVolume, err error) {
 	result = &v1.PersistentVolume{}
 	err = c.client.Patch(pt).
 		Resource("persistentvolumes").
-		SubResource(subresources...).
 		Name(name).
+		SubResource(subresources...).
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(data).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/persistentvolumeclaim.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/persistentvolumeclaim.go
index 410ab37d..f4e205f4 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/persistentvolumeclaim.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/persistentvolumeclaim.go
@@ -19,6 +19,7 @@ limitations under the License.
 package v1
 
 import (
+	"context"
 	"time"
 
 	v1 "k8s.io/api/core/v1"
@@ -37,15 +38,15 @@ type PersistentVolumeClaimsGetter interface {
 
 // PersistentVolumeClaimInterface has methods to work with PersistentVolumeClaim resources.
 type PersistentVolumeClaimInterface interface {
-	Create(*v1.PersistentVolumeClaim) (*v1.PersistentVolumeClaim, error)
-	Update(*v1.PersistentVolumeClaim) (*v1.PersistentVolumeClaim, error)
-	UpdateStatus(*v1.PersistentVolumeClaim) (*v1.PersistentVolumeClaim, error)
-	Delete(name string, options *metav1.DeleteOptions) error
-	DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error
-	Get(name string, options metav1.GetOptions) (*v1.PersistentVolumeClaim, error)
-	List(opts metav1.ListOptions) (*v1.PersistentVolumeClaimList, error)
-	Watch(opts metav1.ListOptions) (watch.Interface, error)
-	Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.PersistentVolumeClaim, err error)
+	Create(ctx context.Context, persistentVolumeClaim *v1.PersistentVolumeClaim, opts metav1.CreateOptions) (*v1.PersistentVolumeClaim, error)
+	Update(ctx context.Context, persistentVolumeClaim *v1.PersistentVolumeClaim, opts metav1.UpdateOptions) (*v1.PersistentVolumeClaim, error)
+	UpdateStatus(ctx context.Context, persistentVolumeClaim *v1.PersistentVolumeClaim, opts metav1.UpdateOptions) (*v1.PersistentVolumeClaim, error)
+	Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error
+	DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error
+	Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.PersistentVolumeClaim, error)
+	List(ctx context.Context, opts metav1.ListOptions) (*v1.PersistentVolumeClaimList, error)
+	Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error)
+	Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.PersistentVolumeClaim, err error)
 	PersistentVolumeClaimExpansion
 }
 
@@ -64,20 +65,20 @@ func newPersistentVolumeClaims(c *CoreV1Client, namespace string) *persistentVol
 }
 
 // Get takes name of the persistentVolumeClaim, and returns the corresponding persistentVolumeClaim object, and an error if there is any.
-func (c *persistentVolumeClaims) Get(name string, options metav1.GetOptions) (result *v1.PersistentVolumeClaim, err error) {
+func (c *persistentVolumeClaims) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.PersistentVolumeClaim, err error) {
 	result = &v1.PersistentVolumeClaim{}
 	err = c.client.Get().
 		Namespace(c.ns).
 		Resource("persistentvolumeclaims").
 		Name(name).
 		VersionedParams(&options, scheme.ParameterCodec).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // List takes label and field selectors, and returns the list of PersistentVolumeClaims that match those selectors.
-func (c *persistentVolumeClaims) List(opts metav1.ListOptions) (result *v1.PersistentVolumeClaimList, err error) {
+func (c *persistentVolumeClaims) List(ctx context.Context, opts metav1.ListOptions) (result *v1.PersistentVolumeClaimList, err error) {
 	var timeout time.Duration
 	if opts.TimeoutSeconds != nil {
 		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
@@ -88,13 +89,13 @@ func (c *persistentVolumeClaims) List(opts metav1.ListOptions) (result *v1.Persi
 		Resource("persistentvolumeclaims").
 		VersionedParams(&opts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Watch returns a watch.Interface that watches the requested persistentVolumeClaims.
-func (c *persistentVolumeClaims) Watch(opts metav1.ListOptions) (watch.Interface, error) {
+func (c *persistentVolumeClaims) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
 	var timeout time.Duration
 	if opts.TimeoutSeconds != nil {
 		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
@@ -105,87 +106,90 @@ func (c *persistentVolumeClaims) Watch(opts metav1.ListOptions) (watch.Interface
 		Resource("persistentvolumeclaims").
 		VersionedParams(&opts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Watch()
+		Watch(ctx)
 }
 
 // Create takes the representation of a persistentVolumeClaim and creates it.  Returns the server's representation of the persistentVolumeClaim, and an error, if there is any.
-func (c *persistentVolumeClaims) Create(persistentVolumeClaim *v1.PersistentVolumeClaim) (result *v1.PersistentVolumeClaim, err error) {
+func (c *persistentVolumeClaims) Create(ctx context.Context, persistentVolumeClaim *v1.PersistentVolumeClaim, opts metav1.CreateOptions) (result *v1.PersistentVolumeClaim, err error) {
 	result = &v1.PersistentVolumeClaim{}
 	err = c.client.Post().
 		Namespace(c.ns).
 		Resource("persistentvolumeclaims").
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(persistentVolumeClaim).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Update takes the representation of a persistentVolumeClaim and updates it. Returns the server's representation of the persistentVolumeClaim, and an error, if there is any.
-func (c *persistentVolumeClaims) Update(persistentVolumeClaim *v1.PersistentVolumeClaim) (result *v1.PersistentVolumeClaim, err error) {
+func (c *persistentVolumeClaims) Update(ctx context.Context, persistentVolumeClaim *v1.PersistentVolumeClaim, opts metav1.UpdateOptions) (result *v1.PersistentVolumeClaim, err error) {
 	result = &v1.PersistentVolumeClaim{}
 	err = c.client.Put().
 		Namespace(c.ns).
 		Resource("persistentvolumeclaims").
 		Name(persistentVolumeClaim.Name).
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(persistentVolumeClaim).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // UpdateStatus was generated because the type contains a Status member.
 // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
-
-func (c *persistentVolumeClaims) UpdateStatus(persistentVolumeClaim *v1.PersistentVolumeClaim) (result *v1.PersistentVolumeClaim, err error) {
+func (c *persistentVolumeClaims) UpdateStatus(ctx context.Context, persistentVolumeClaim *v1.PersistentVolumeClaim, opts metav1.UpdateOptions) (result *v1.PersistentVolumeClaim, err error) {
 	result = &v1.PersistentVolumeClaim{}
 	err = c.client.Put().
 		Namespace(c.ns).
 		Resource("persistentvolumeclaims").
 		Name(persistentVolumeClaim.Name).
 		SubResource("status").
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(persistentVolumeClaim).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Delete takes name of the persistentVolumeClaim and deletes it. Returns an error if one occurs.
-func (c *persistentVolumeClaims) Delete(name string, options *metav1.DeleteOptions) error {
+func (c *persistentVolumeClaims) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error {
 	return c.client.Delete().
 		Namespace(c.ns).
 		Resource("persistentvolumeclaims").
 		Name(name).
-		Body(options).
-		Do().
+		Body(&opts).
+		Do(ctx).
 		Error()
 }
 
 // DeleteCollection deletes a collection of objects.
-func (c *persistentVolumeClaims) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error {
+func (c *persistentVolumeClaims) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error {
 	var timeout time.Duration
-	if listOptions.TimeoutSeconds != nil {
-		timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second
+	if listOpts.TimeoutSeconds != nil {
+		timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
 	}
 	return c.client.Delete().
 		Namespace(c.ns).
 		Resource("persistentvolumeclaims").
-		VersionedParams(&listOptions, scheme.ParameterCodec).
+		VersionedParams(&listOpts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Body(options).
-		Do().
+		Body(&opts).
+		Do(ctx).
 		Error()
 }
 
 // Patch applies the patch and returns the patched persistentVolumeClaim.
-func (c *persistentVolumeClaims) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.PersistentVolumeClaim, err error) {
+func (c *persistentVolumeClaims) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.PersistentVolumeClaim, err error) {
 	result = &v1.PersistentVolumeClaim{}
 	err = c.client.Patch(pt).
 		Namespace(c.ns).
 		Resource("persistentvolumeclaims").
-		SubResource(subresources...).
 		Name(name).
+		SubResource(subresources...).
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(data).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/pod.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/pod.go
index feacd307..36092ab6 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/pod.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/pod.go
@@ -19,6 +19,7 @@ limitations under the License.
 package v1
 
 import (
+	"context"
 	"time"
 
 	v1 "k8s.io/api/core/v1"
@@ -37,17 +38,17 @@ type PodsGetter interface {
 
 // PodInterface has methods to work with Pod resources.
 type PodInterface interface {
-	Create(*v1.Pod) (*v1.Pod, error)
-	Update(*v1.Pod) (*v1.Pod, error)
-	UpdateStatus(*v1.Pod) (*v1.Pod, error)
-	Delete(name string, options *metav1.DeleteOptions) error
-	DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error
-	Get(name string, options metav1.GetOptions) (*v1.Pod, error)
-	List(opts metav1.ListOptions) (*v1.PodList, error)
-	Watch(opts metav1.ListOptions) (watch.Interface, error)
-	Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Pod, err error)
-	GetEphemeralContainers(podName string, options metav1.GetOptions) (*v1.EphemeralContainers, error)
-	UpdateEphemeralContainers(podName string, ephemeralContainers *v1.EphemeralContainers) (*v1.EphemeralContainers, error)
+	Create(ctx context.Context, pod *v1.Pod, opts metav1.CreateOptions) (*v1.Pod, error)
+	Update(ctx context.Context, pod *v1.Pod, opts metav1.UpdateOptions) (*v1.Pod, error)
+	UpdateStatus(ctx context.Context, pod *v1.Pod, opts metav1.UpdateOptions) (*v1.Pod, error)
+	Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error
+	DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error
+	Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.Pod, error)
+	List(ctx context.Context, opts metav1.ListOptions) (*v1.PodList, error)
+	Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error)
+	Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Pod, err error)
+	GetEphemeralContainers(ctx context.Context, podName string, options metav1.GetOptions) (*v1.EphemeralContainers, error)
+	UpdateEphemeralContainers(ctx context.Context, podName string, ephemeralContainers *v1.EphemeralContainers, opts metav1.UpdateOptions) (*v1.EphemeralContainers, error)
 
 	PodExpansion
 }
@@ -67,20 +68,20 @@ func newPods(c *CoreV1Client, namespace string) *pods {
 }
 
 // Get takes name of the pod, and returns the corresponding pod object, and an error if there is any.
-func (c *pods) Get(name string, options metav1.GetOptions) (result *v1.Pod, err error) {
+func (c *pods) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Pod, err error) {
 	result = &v1.Pod{}
 	err = c.client.Get().
 		Namespace(c.ns).
 		Resource("pods").
 		Name(name).
 		VersionedParams(&options, scheme.ParameterCodec).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // List takes label and field selectors, and returns the list of Pods that match those selectors.
-func (c *pods) List(opts metav1.ListOptions) (result *v1.PodList, err error) {
+func (c *pods) List(ctx context.Context, opts metav1.ListOptions) (result *v1.PodList, err error) {
 	var timeout time.Duration
 	if opts.TimeoutSeconds != nil {
 		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
@@ -91,13 +92,13 @@ func (c *pods) List(opts metav1.ListOptions) (result *v1.PodList, err error) {
 		Resource("pods").
 		VersionedParams(&opts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Watch returns a watch.Interface that watches the requested pods.
-func (c *pods) Watch(opts metav1.ListOptions) (watch.Interface, error) {
+func (c *pods) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
 	var timeout time.Duration
 	if opts.TimeoutSeconds != nil {
 		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
@@ -108,93 +109,96 @@ func (c *pods) Watch(opts metav1.ListOptions) (watch.Interface, error) {
 		Resource("pods").
 		VersionedParams(&opts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Watch()
+		Watch(ctx)
 }
 
 // Create takes the representation of a pod and creates it.  Returns the server's representation of the pod, and an error, if there is any.
-func (c *pods) Create(pod *v1.Pod) (result *v1.Pod, err error) {
+func (c *pods) Create(ctx context.Context, pod *v1.Pod, opts metav1.CreateOptions) (result *v1.Pod, err error) {
 	result = &v1.Pod{}
 	err = c.client.Post().
 		Namespace(c.ns).
 		Resource("pods").
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(pod).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Update takes the representation of a pod and updates it. Returns the server's representation of the pod, and an error, if there is any.
-func (c *pods) Update(pod *v1.Pod) (result *v1.Pod, err error) {
+func (c *pods) Update(ctx context.Context, pod *v1.Pod, opts metav1.UpdateOptions) (result *v1.Pod, err error) {
 	result = &v1.Pod{}
 	err = c.client.Put().
 		Namespace(c.ns).
 		Resource("pods").
 		Name(pod.Name).
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(pod).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // UpdateStatus was generated because the type contains a Status member.
 // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
-
-func (c *pods) UpdateStatus(pod *v1.Pod) (result *v1.Pod, err error) {
+func (c *pods) UpdateStatus(ctx context.Context, pod *v1.Pod, opts metav1.UpdateOptions) (result *v1.Pod, err error) {
 	result = &v1.Pod{}
 	err = c.client.Put().
 		Namespace(c.ns).
 		Resource("pods").
 		Name(pod.Name).
 		SubResource("status").
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(pod).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Delete takes name of the pod and deletes it. Returns an error if one occurs.
-func (c *pods) Delete(name string, options *metav1.DeleteOptions) error {
+func (c *pods) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error {
 	return c.client.Delete().
 		Namespace(c.ns).
 		Resource("pods").
 		Name(name).
-		Body(options).
-		Do().
+		Body(&opts).
+		Do(ctx).
 		Error()
 }
 
 // DeleteCollection deletes a collection of objects.
-func (c *pods) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error {
+func (c *pods) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error {
 	var timeout time.Duration
-	if listOptions.TimeoutSeconds != nil {
-		timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second
+	if listOpts.TimeoutSeconds != nil {
+		timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
 	}
 	return c.client.Delete().
 		Namespace(c.ns).
 		Resource("pods").
-		VersionedParams(&listOptions, scheme.ParameterCodec).
+		VersionedParams(&listOpts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Body(options).
-		Do().
+		Body(&opts).
+		Do(ctx).
 		Error()
 }
 
 // Patch applies the patch and returns the patched pod.
-func (c *pods) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Pod, err error) {
+func (c *pods) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Pod, err error) {
 	result = &v1.Pod{}
 	err = c.client.Patch(pt).
 		Namespace(c.ns).
 		Resource("pods").
-		SubResource(subresources...).
 		Name(name).
+		SubResource(subresources...).
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(data).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // GetEphemeralContainers takes name of the pod, and returns the corresponding v1.EphemeralContainers object, and an error if there is any.
-func (c *pods) GetEphemeralContainers(podName string, options metav1.GetOptions) (result *v1.EphemeralContainers, err error) {
+func (c *pods) GetEphemeralContainers(ctx context.Context, podName string, options metav1.GetOptions) (result *v1.EphemeralContainers, err error) {
 	result = &v1.EphemeralContainers{}
 	err = c.client.Get().
 		Namespace(c.ns).
@@ -202,21 +206,22 @@ func (c *pods) GetEphemeralContainers(podName string, options metav1.GetOptions)
 		Name(podName).
 		SubResource("ephemeralcontainers").
 		VersionedParams(&options, scheme.ParameterCodec).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // UpdateEphemeralContainers takes the top resource name and the representation of a ephemeralContainers and updates it. Returns the server's representation of the ephemeralContainers, and an error, if there is any.
-func (c *pods) UpdateEphemeralContainers(podName string, ephemeralContainers *v1.EphemeralContainers) (result *v1.EphemeralContainers, err error) {
+func (c *pods) UpdateEphemeralContainers(ctx context.Context, podName string, ephemeralContainers *v1.EphemeralContainers, opts metav1.UpdateOptions) (result *v1.EphemeralContainers, err error) {
 	result = &v1.EphemeralContainers{}
 	err = c.client.Put().
 		Namespace(c.ns).
 		Resource("pods").
 		Name(podName).
 		SubResource("ephemeralcontainers").
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(ephemeralContainers).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/pod_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/pod_expansion.go
index ed876be8..8710a2c0 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/pod_expansion.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/pod_expansion.go
@@ -17,26 +17,29 @@ limitations under the License.
 package v1
 
 import (
-	"k8s.io/api/core/v1"
+	"context"
+
+	v1 "k8s.io/api/core/v1"
 	policy "k8s.io/api/policy/v1beta1"
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
 	"k8s.io/client-go/kubernetes/scheme"
 	restclient "k8s.io/client-go/rest"
 )
 
 // The PodExpansion interface allows manually adding extra methods to the PodInterface.
 type PodExpansion interface {
-	Bind(binding *v1.Binding) error
-	Evict(eviction *policy.Eviction) error
+	Bind(ctx context.Context, binding *v1.Binding, opts metav1.CreateOptions) error
+	Evict(ctx context.Context, eviction *policy.Eviction) error
 	GetLogs(name string, opts *v1.PodLogOptions) *restclient.Request
 }
 
 // Bind applies the provided binding to the named pod in the current namespace (binding.Namespace is ignored).
-func (c *pods) Bind(binding *v1.Binding) error {
-	return c.client.Post().Namespace(c.ns).Resource("pods").Name(binding.Name).SubResource("binding").Body(binding).Do().Error()
+func (c *pods) Bind(ctx context.Context, binding *v1.Binding, opts metav1.CreateOptions) error {
+	return c.client.Post().Namespace(c.ns).Resource("pods").Name(binding.Name).VersionedParams(&opts, scheme.ParameterCodec).SubResource("binding").Body(binding).Do(ctx).Error()
 }
 
-func (c *pods) Evict(eviction *policy.Eviction) error {
-	return c.client.Post().Namespace(c.ns).Resource("pods").Name(eviction.Name).SubResource("eviction").Body(eviction).Do().Error()
+func (c *pods) Evict(ctx context.Context, eviction *policy.Eviction) error {
+	return c.client.Post().Namespace(c.ns).Resource("pods").Name(eviction.Name).SubResource("eviction").Body(eviction).Do(ctx).Error()
 }
 
 // Get constructs a request for getting the logs for a pod
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/podtemplate.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/podtemplate.go
index 84d7c980..012d3b52 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/podtemplate.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/podtemplate.go
@@ -19,6 +19,7 @@ limitations under the License.
 package v1
 
 import (
+	"context"
 	"time"
 
 	v1 "k8s.io/api/core/v1"
@@ -37,14 +38,14 @@ type PodTemplatesGetter interface {
 
 // PodTemplateInterface has methods to work with PodTemplate resources.
 type PodTemplateInterface interface {
-	Create(*v1.PodTemplate) (*v1.PodTemplate, error)
-	Update(*v1.PodTemplate) (*v1.PodTemplate, error)
-	Delete(name string, options *metav1.DeleteOptions) error
-	DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error
-	Get(name string, options metav1.GetOptions) (*v1.PodTemplate, error)
-	List(opts metav1.ListOptions) (*v1.PodTemplateList, error)
-	Watch(opts metav1.ListOptions) (watch.Interface, error)
-	Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.PodTemplate, err error)
+	Create(ctx context.Context, podTemplate *v1.PodTemplate, opts metav1.CreateOptions) (*v1.PodTemplate, error)
+	Update(ctx context.Context, podTemplate *v1.PodTemplate, opts metav1.UpdateOptions) (*v1.PodTemplate, error)
+	Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error
+	DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error
+	Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.PodTemplate, error)
+	List(ctx context.Context, opts metav1.ListOptions) (*v1.PodTemplateList, error)
+	Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error)
+	Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.PodTemplate, err error)
 	PodTemplateExpansion
 }
 
@@ -63,20 +64,20 @@ func newPodTemplates(c *CoreV1Client, namespace string) *podTemplates {
 }
 
 // Get takes name of the podTemplate, and returns the corresponding podTemplate object, and an error if there is any.
-func (c *podTemplates) Get(name string, options metav1.GetOptions) (result *v1.PodTemplate, err error) {
+func (c *podTemplates) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.PodTemplate, err error) {
 	result = &v1.PodTemplate{}
 	err = c.client.Get().
 		Namespace(c.ns).
 		Resource("podtemplates").
 		Name(name).
 		VersionedParams(&options, scheme.ParameterCodec).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // List takes label and field selectors, and returns the list of PodTemplates that match those selectors.
-func (c *podTemplates) List(opts metav1.ListOptions) (result *v1.PodTemplateList, err error) {
+func (c *podTemplates) List(ctx context.Context, opts metav1.ListOptions) (result *v1.PodTemplateList, err error) {
 	var timeout time.Duration
 	if opts.TimeoutSeconds != nil {
 		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
@@ -87,13 +88,13 @@ func (c *podTemplates) List(opts metav1.ListOptions) (result *v1.PodTemplateList
 		Resource("podtemplates").
 		VersionedParams(&opts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Watch returns a watch.Interface that watches the requested podTemplates.
-func (c *podTemplates) Watch(opts metav1.ListOptions) (watch.Interface, error) {
+func (c *podTemplates) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
 	var timeout time.Duration
 	if opts.TimeoutSeconds != nil {
 		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
@@ -104,71 +105,74 @@ func (c *podTemplates) Watch(opts metav1.ListOptions) (watch.Interface, error) {
 		Resource("podtemplates").
 		VersionedParams(&opts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Watch()
+		Watch(ctx)
 }
 
 // Create takes the representation of a podTemplate and creates it.  Returns the server's representation of the podTemplate, and an error, if there is any.
-func (c *podTemplates) Create(podTemplate *v1.PodTemplate) (result *v1.PodTemplate, err error) {
+func (c *podTemplates) Create(ctx context.Context, podTemplate *v1.PodTemplate, opts metav1.CreateOptions) (result *v1.PodTemplate, err error) {
 	result = &v1.PodTemplate{}
 	err = c.client.Post().
 		Namespace(c.ns).
 		Resource("podtemplates").
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(podTemplate).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Update takes the representation of a podTemplate and updates it. Returns the server's representation of the podTemplate, and an error, if there is any.
-func (c *podTemplates) Update(podTemplate *v1.PodTemplate) (result *v1.PodTemplate, err error) {
+func (c *podTemplates) Update(ctx context.Context, podTemplate *v1.PodTemplate, opts metav1.UpdateOptions) (result *v1.PodTemplate, err error) {
 	result = &v1.PodTemplate{}
 	err = c.client.Put().
 		Namespace(c.ns).
 		Resource("podtemplates").
 		Name(podTemplate.Name).
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(podTemplate).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Delete takes name of the podTemplate and deletes it. Returns an error if one occurs.
-func (c *podTemplates) Delete(name string, options *metav1.DeleteOptions) error {
+func (c *podTemplates) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error {
 	return c.client.Delete().
 		Namespace(c.ns).
 		Resource("podtemplates").
 		Name(name).
-		Body(options).
-		Do().
+		Body(&opts).
+		Do(ctx).
 		Error()
 }
 
 // DeleteCollection deletes a collection of objects.
-func (c *podTemplates) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error {
+func (c *podTemplates) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error {
 	var timeout time.Duration
-	if listOptions.TimeoutSeconds != nil {
-		timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second
+	if listOpts.TimeoutSeconds != nil {
+		timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
 	}
 	return c.client.Delete().
 		Namespace(c.ns).
 		Resource("podtemplates").
-		VersionedParams(&listOptions, scheme.ParameterCodec).
+		VersionedParams(&listOpts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Body(options).
-		Do().
+		Body(&opts).
+		Do(ctx).
 		Error()
 }
 
 // Patch applies the patch and returns the patched podTemplate.
-func (c *podTemplates) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.PodTemplate, err error) {
+func (c *podTemplates) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.PodTemplate, err error) {
 	result = &v1.PodTemplate{}
 	err = c.client.Patch(pt).
 		Namespace(c.ns).
 		Resource("podtemplates").
-		SubResource(subresources...).
 		Name(name).
+		SubResource(subresources...).
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(data).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/replicationcontroller.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/replicationcontroller.go
index dd3182db..8e9ccd59 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/replicationcontroller.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/replicationcontroller.go
@@ -19,6 +19,7 @@ limitations under the License.
 package v1
 
 import (
+	"context"
 	"time"
 
 	autoscalingv1 "k8s.io/api/autoscaling/v1"
@@ -38,17 +39,17 @@ type ReplicationControllersGetter interface {
 
 // ReplicationControllerInterface has methods to work with ReplicationController resources.
 type ReplicationControllerInterface interface {
-	Create(*v1.ReplicationController) (*v1.ReplicationController, error)
-	Update(*v1.ReplicationController) (*v1.ReplicationController, error)
-	UpdateStatus(*v1.ReplicationController) (*v1.ReplicationController, error)
-	Delete(name string, options *metav1.DeleteOptions) error
-	DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error
-	Get(name string, options metav1.GetOptions) (*v1.ReplicationController, error)
-	List(opts metav1.ListOptions) (*v1.ReplicationControllerList, error)
-	Watch(opts metav1.ListOptions) (watch.Interface, error)
-	Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.ReplicationController, err error)
-	GetScale(replicationControllerName string, options metav1.GetOptions) (*autoscalingv1.Scale, error)
-	UpdateScale(replicationControllerName string, scale *autoscalingv1.Scale) (*autoscalingv1.Scale, error)
+	Create(ctx context.Context, replicationController *v1.ReplicationController, opts metav1.CreateOptions) (*v1.ReplicationController, error)
+	Update(ctx context.Context, replicationController *v1.ReplicationController, opts metav1.UpdateOptions) (*v1.ReplicationController, error)
+	UpdateStatus(ctx context.Context, replicationController *v1.ReplicationController, opts metav1.UpdateOptions) (*v1.ReplicationController, error)
+	Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error
+	DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error
+	Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.ReplicationController, error)
+	List(ctx context.Context, opts metav1.ListOptions) (*v1.ReplicationControllerList, error)
+	Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error)
+	Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ReplicationController, err error)
+	GetScale(ctx context.Context, replicationControllerName string, options metav1.GetOptions) (*autoscalingv1.Scale, error)
+	UpdateScale(ctx context.Context, replicationControllerName string, scale *autoscalingv1.Scale, opts metav1.UpdateOptions) (*autoscalingv1.Scale, error)
 
 	ReplicationControllerExpansion
 }
@@ -68,20 +69,20 @@ func newReplicationControllers(c *CoreV1Client, namespace string) *replicationCo
 }
 
 // Get takes name of the replicationController, and returns the corresponding replicationController object, and an error if there is any.
-func (c *replicationControllers) Get(name string, options metav1.GetOptions) (result *v1.ReplicationController, err error) {
+func (c *replicationControllers) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.ReplicationController, err error) {
 	result = &v1.ReplicationController{}
 	err = c.client.Get().
 		Namespace(c.ns).
 		Resource("replicationcontrollers").
 		Name(name).
 		VersionedParams(&options, scheme.ParameterCodec).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // List takes label and field selectors, and returns the list of ReplicationControllers that match those selectors.
-func (c *replicationControllers) List(opts metav1.ListOptions) (result *v1.ReplicationControllerList, err error) {
+func (c *replicationControllers) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ReplicationControllerList, err error) {
 	var timeout time.Duration
 	if opts.TimeoutSeconds != nil {
 		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
@@ -92,13 +93,13 @@ func (c *replicationControllers) List(opts metav1.ListOptions) (result *v1.Repli
 		Resource("replicationcontrollers").
 		VersionedParams(&opts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Watch returns a watch.Interface that watches the requested replicationControllers.
-func (c *replicationControllers) Watch(opts metav1.ListOptions) (watch.Interface, error) {
+func (c *replicationControllers) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
 	var timeout time.Duration
 	if opts.TimeoutSeconds != nil {
 		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
@@ -109,93 +110,96 @@ func (c *replicationControllers) Watch(opts metav1.ListOptions) (watch.Interface
 		Resource("replicationcontrollers").
 		VersionedParams(&opts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Watch()
+		Watch(ctx)
 }
 
 // Create takes the representation of a replicationController and creates it.  Returns the server's representation of the replicationController, and an error, if there is any.
-func (c *replicationControllers) Create(replicationController *v1.ReplicationController) (result *v1.ReplicationController, err error) {
+func (c *replicationControllers) Create(ctx context.Context, replicationController *v1.ReplicationController, opts metav1.CreateOptions) (result *v1.ReplicationController, err error) {
 	result = &v1.ReplicationController{}
 	err = c.client.Post().
 		Namespace(c.ns).
 		Resource("replicationcontrollers").
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(replicationController).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Update takes the representation of a replicationController and updates it. Returns the server's representation of the replicationController, and an error, if there is any.
-func (c *replicationControllers) Update(replicationController *v1.ReplicationController) (result *v1.ReplicationController, err error) {
+func (c *replicationControllers) Update(ctx context.Context, replicationController *v1.ReplicationController, opts metav1.UpdateOptions) (result *v1.ReplicationController, err error) {
 	result = &v1.ReplicationController{}
 	err = c.client.Put().
 		Namespace(c.ns).
 		Resource("replicationcontrollers").
 		Name(replicationController.Name).
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(replicationController).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // UpdateStatus was generated because the type contains a Status member.
 // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
-
-func (c *replicationControllers) UpdateStatus(replicationController *v1.ReplicationController) (result *v1.ReplicationController, err error) {
+func (c *replicationControllers) UpdateStatus(ctx context.Context, replicationController *v1.ReplicationController, opts metav1.UpdateOptions) (result *v1.ReplicationController, err error) {
 	result = &v1.ReplicationController{}
 	err = c.client.Put().
 		Namespace(c.ns).
 		Resource("replicationcontrollers").
 		Name(replicationController.Name).
 		SubResource("status").
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(replicationController).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Delete takes name of the replicationController and deletes it. Returns an error if one occurs.
-func (c *replicationControllers) Delete(name string, options *metav1.DeleteOptions) error {
+func (c *replicationControllers) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error {
 	return c.client.Delete().
 		Namespace(c.ns).
 		Resource("replicationcontrollers").
 		Name(name).
-		Body(options).
-		Do().
+		Body(&opts).
+		Do(ctx).
 		Error()
 }
 
 // DeleteCollection deletes a collection of objects.
-func (c *replicationControllers) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error {
+func (c *replicationControllers) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error {
 	var timeout time.Duration
-	if listOptions.TimeoutSeconds != nil {
-		timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second
+	if listOpts.TimeoutSeconds != nil {
+		timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
 	}
 	return c.client.Delete().
 		Namespace(c.ns).
 		Resource("replicationcontrollers").
-		VersionedParams(&listOptions, scheme.ParameterCodec).
+		VersionedParams(&listOpts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Body(options).
-		Do().
+		Body(&opts).
+		Do(ctx).
 		Error()
 }
 
 // Patch applies the patch and returns the patched replicationController.
-func (c *replicationControllers) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.ReplicationController, err error) {
+func (c *replicationControllers) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ReplicationController, err error) {
 	result = &v1.ReplicationController{}
 	err = c.client.Patch(pt).
 		Namespace(c.ns).
 		Resource("replicationcontrollers").
-		SubResource(subresources...).
 		Name(name).
+		SubResource(subresources...).
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(data).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // GetScale takes name of the replicationController, and returns the corresponding autoscalingv1.Scale object, and an error if there is any.
-func (c *replicationControllers) GetScale(replicationControllerName string, options metav1.GetOptions) (result *autoscalingv1.Scale, err error) {
+func (c *replicationControllers) GetScale(ctx context.Context, replicationControllerName string, options metav1.GetOptions) (result *autoscalingv1.Scale, err error) {
 	result = &autoscalingv1.Scale{}
 	err = c.client.Get().
 		Namespace(c.ns).
@@ -203,21 +207,22 @@ func (c *replicationControllers) GetScale(replicationControllerName string, opti
 		Name(replicationControllerName).
 		SubResource("scale").
 		VersionedParams(&options, scheme.ParameterCodec).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // UpdateScale takes the top resource name and the representation of a scale and updates it. Returns the server's representation of the scale, and an error, if there is any.
-func (c *replicationControllers) UpdateScale(replicationControllerName string, scale *autoscalingv1.Scale) (result *autoscalingv1.Scale, err error) {
+func (c *replicationControllers) UpdateScale(ctx context.Context, replicationControllerName string, scale *autoscalingv1.Scale, opts metav1.UpdateOptions) (result *autoscalingv1.Scale, err error) {
 	result = &autoscalingv1.Scale{}
 	err = c.client.Put().
 		Namespace(c.ns).
 		Resource("replicationcontrollers").
 		Name(replicationControllerName).
 		SubResource("scale").
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(scale).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/resourcequota.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/resourcequota.go
index 5a178990..6a41e35f 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/resourcequota.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/resourcequota.go
@@ -19,6 +19,7 @@ limitations under the License.
 package v1
 
 import (
+	"context"
 	"time"
 
 	v1 "k8s.io/api/core/v1"
@@ -37,15 +38,15 @@ type ResourceQuotasGetter interface {
 
 // ResourceQuotaInterface has methods to work with ResourceQuota resources.
 type ResourceQuotaInterface interface {
-	Create(*v1.ResourceQuota) (*v1.ResourceQuota, error)
-	Update(*v1.ResourceQuota) (*v1.ResourceQuota, error)
-	UpdateStatus(*v1.ResourceQuota) (*v1.ResourceQuota, error)
-	Delete(name string, options *metav1.DeleteOptions) error
-	DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error
-	Get(name string, options metav1.GetOptions) (*v1.ResourceQuota, error)
-	List(opts metav1.ListOptions) (*v1.ResourceQuotaList, error)
-	Watch(opts metav1.ListOptions) (watch.Interface, error)
-	Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.ResourceQuota, err error)
+	Create(ctx context.Context, resourceQuota *v1.ResourceQuota, opts metav1.CreateOptions) (*v1.ResourceQuota, error)
+	Update(ctx context.Context, resourceQuota *v1.ResourceQuota, opts metav1.UpdateOptions) (*v1.ResourceQuota, error)
+	UpdateStatus(ctx context.Context, resourceQuota *v1.ResourceQuota, opts metav1.UpdateOptions) (*v1.ResourceQuota, error)
+	Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error
+	DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error
+	Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.ResourceQuota, error)
+	List(ctx context.Context, opts metav1.ListOptions) (*v1.ResourceQuotaList, error)
+	Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error)
+	Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ResourceQuota, err error)
 	ResourceQuotaExpansion
 }
 
@@ -64,20 +65,20 @@ func newResourceQuotas(c *CoreV1Client, namespace string) *resourceQuotas {
 }
 
 // Get takes name of the resourceQuota, and returns the corresponding resourceQuota object, and an error if there is any.
-func (c *resourceQuotas) Get(name string, options metav1.GetOptions) (result *v1.ResourceQuota, err error) {
+func (c *resourceQuotas) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.ResourceQuota, err error) {
 	result = &v1.ResourceQuota{}
 	err = c.client.Get().
 		Namespace(c.ns).
 		Resource("resourcequotas").
 		Name(name).
 		VersionedParams(&options, scheme.ParameterCodec).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // List takes label and field selectors, and returns the list of ResourceQuotas that match those selectors.
-func (c *resourceQuotas) List(opts metav1.ListOptions) (result *v1.ResourceQuotaList, err error) {
+func (c *resourceQuotas) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ResourceQuotaList, err error) {
 	var timeout time.Duration
 	if opts.TimeoutSeconds != nil {
 		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
@@ -88,13 +89,13 @@ func (c *resourceQuotas) List(opts metav1.ListOptions) (result *v1.ResourceQuota
 		Resource("resourcequotas").
 		VersionedParams(&opts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Watch returns a watch.Interface that watches the requested resourceQuotas.
-func (c *resourceQuotas) Watch(opts metav1.ListOptions) (watch.Interface, error) {
+func (c *resourceQuotas) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
 	var timeout time.Duration
 	if opts.TimeoutSeconds != nil {
 		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
@@ -105,87 +106,90 @@ func (c *resourceQuotas) Watch(opts metav1.ListOptions) (watch.Interface, error)
 		Resource("resourcequotas").
 		VersionedParams(&opts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Watch()
+		Watch(ctx)
 }
 
 // Create takes the representation of a resourceQuota and creates it.  Returns the server's representation of the resourceQuota, and an error, if there is any.
-func (c *resourceQuotas) Create(resourceQuota *v1.ResourceQuota) (result *v1.ResourceQuota, err error) {
+func (c *resourceQuotas) Create(ctx context.Context, resourceQuota *v1.ResourceQuota, opts metav1.CreateOptions) (result *v1.ResourceQuota, err error) {
 	result = &v1.ResourceQuota{}
 	err = c.client.Post().
 		Namespace(c.ns).
 		Resource("resourcequotas").
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(resourceQuota).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Update takes the representation of a resourceQuota and updates it. Returns the server's representation of the resourceQuota, and an error, if there is any.
-func (c *resourceQuotas) Update(resourceQuota *v1.ResourceQuota) (result *v1.ResourceQuota, err error) {
+func (c *resourceQuotas) Update(ctx context.Context, resourceQuota *v1.ResourceQuota, opts metav1.UpdateOptions) (result *v1.ResourceQuota, err error) {
 	result = &v1.ResourceQuota{}
 	err = c.client.Put().
 		Namespace(c.ns).
 		Resource("resourcequotas").
 		Name(resourceQuota.Name).
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(resourceQuota).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // UpdateStatus was generated because the type contains a Status member.
 // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
-
-func (c *resourceQuotas) UpdateStatus(resourceQuota *v1.ResourceQuota) (result *v1.ResourceQuota, err error) {
+func (c *resourceQuotas) UpdateStatus(ctx context.Context, resourceQuota *v1.ResourceQuota, opts metav1.UpdateOptions) (result *v1.ResourceQuota, err error) {
 	result = &v1.ResourceQuota{}
 	err = c.client.Put().
 		Namespace(c.ns).
 		Resource("resourcequotas").
 		Name(resourceQuota.Name).
 		SubResource("status").
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(resourceQuota).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Delete takes name of the resourceQuota and deletes it. Returns an error if one occurs.
-func (c *resourceQuotas) Delete(name string, options *metav1.DeleteOptions) error {
+func (c *resourceQuotas) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error {
 	return c.client.Delete().
 		Namespace(c.ns).
 		Resource("resourcequotas").
 		Name(name).
-		Body(options).
-		Do().
+		Body(&opts).
+		Do(ctx).
 		Error()
 }
 
 // DeleteCollection deletes a collection of objects.
-func (c *resourceQuotas) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error {
+func (c *resourceQuotas) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error {
 	var timeout time.Duration
-	if listOptions.TimeoutSeconds != nil {
-		timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second
+	if listOpts.TimeoutSeconds != nil {
+		timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
 	}
 	return c.client.Delete().
 		Namespace(c.ns).
 		Resource("resourcequotas").
-		VersionedParams(&listOptions, scheme.ParameterCodec).
+		VersionedParams(&listOpts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Body(options).
-		Do().
+		Body(&opts).
+		Do(ctx).
 		Error()
 }
 
 // Patch applies the patch and returns the patched resourceQuota.
-func (c *resourceQuotas) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.ResourceQuota, err error) {
+func (c *resourceQuotas) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ResourceQuota, err error) {
 	result = &v1.ResourceQuota{}
 	err = c.client.Patch(pt).
 		Namespace(c.ns).
 		Resource("resourcequotas").
-		SubResource(subresources...).
 		Name(name).
+		SubResource(subresources...).
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(data).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/secret.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/secret.go
index 85c143b1..b2bd80ba 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/secret.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/secret.go
@@ -19,6 +19,7 @@ limitations under the License.
 package v1
 
 import (
+	"context"
 	"time"
 
 	v1 "k8s.io/api/core/v1"
@@ -37,14 +38,14 @@ type SecretsGetter interface {
 
 // SecretInterface has methods to work with Secret resources.
 type SecretInterface interface {
-	Create(*v1.Secret) (*v1.Secret, error)
-	Update(*v1.Secret) (*v1.Secret, error)
-	Delete(name string, options *metav1.DeleteOptions) error
-	DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error
-	Get(name string, options metav1.GetOptions) (*v1.Secret, error)
-	List(opts metav1.ListOptions) (*v1.SecretList, error)
-	Watch(opts metav1.ListOptions) (watch.Interface, error)
-	Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Secret, err error)
+	Create(ctx context.Context, secret *v1.Secret, opts metav1.CreateOptions) (*v1.Secret, error)
+	Update(ctx context.Context, secret *v1.Secret, opts metav1.UpdateOptions) (*v1.Secret, error)
+	Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error
+	DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error
+	Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.Secret, error)
+	List(ctx context.Context, opts metav1.ListOptions) (*v1.SecretList, error)
+	Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error)
+	Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Secret, err error)
 	SecretExpansion
 }
 
@@ -63,20 +64,20 @@ func newSecrets(c *CoreV1Client, namespace string) *secrets {
 }
 
 // Get takes name of the secret, and returns the corresponding secret object, and an error if there is any.
-func (c *secrets) Get(name string, options metav1.GetOptions) (result *v1.Secret, err error) {
+func (c *secrets) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Secret, err error) {
 	result = &v1.Secret{}
 	err = c.client.Get().
 		Namespace(c.ns).
 		Resource("secrets").
 		Name(name).
 		VersionedParams(&options, scheme.ParameterCodec).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // List takes label and field selectors, and returns the list of Secrets that match those selectors.
-func (c *secrets) List(opts metav1.ListOptions) (result *v1.SecretList, err error) {
+func (c *secrets) List(ctx context.Context, opts metav1.ListOptions) (result *v1.SecretList, err error) {
 	var timeout time.Duration
 	if opts.TimeoutSeconds != nil {
 		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
@@ -87,13 +88,13 @@ func (c *secrets) List(opts metav1.ListOptions) (result *v1.SecretList, err erro
 		Resource("secrets").
 		VersionedParams(&opts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Watch returns a watch.Interface that watches the requested secrets.
-func (c *secrets) Watch(opts metav1.ListOptions) (watch.Interface, error) {
+func (c *secrets) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
 	var timeout time.Duration
 	if opts.TimeoutSeconds != nil {
 		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
@@ -104,71 +105,74 @@ func (c *secrets) Watch(opts metav1.ListOptions) (watch.Interface, error) {
 		Resource("secrets").
 		VersionedParams(&opts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Watch()
+		Watch(ctx)
 }
 
 // Create takes the representation of a secret and creates it.  Returns the server's representation of the secret, and an error, if there is any.
-func (c *secrets) Create(secret *v1.Secret) (result *v1.Secret, err error) {
+func (c *secrets) Create(ctx context.Context, secret *v1.Secret, opts metav1.CreateOptions) (result *v1.Secret, err error) {
 	result = &v1.Secret{}
 	err = c.client.Post().
 		Namespace(c.ns).
 		Resource("secrets").
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(secret).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Update takes the representation of a secret and updates it. Returns the server's representation of the secret, and an error, if there is any.
-func (c *secrets) Update(secret *v1.Secret) (result *v1.Secret, err error) {
+func (c *secrets) Update(ctx context.Context, secret *v1.Secret, opts metav1.UpdateOptions) (result *v1.Secret, err error) {
 	result = &v1.Secret{}
 	err = c.client.Put().
 		Namespace(c.ns).
 		Resource("secrets").
 		Name(secret.Name).
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(secret).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Delete takes name of the secret and deletes it. Returns an error if one occurs.
-func (c *secrets) Delete(name string, options *metav1.DeleteOptions) error {
+func (c *secrets) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error {
 	return c.client.Delete().
 		Namespace(c.ns).
 		Resource("secrets").
 		Name(name).
-		Body(options).
-		Do().
+		Body(&opts).
+		Do(ctx).
 		Error()
 }
 
 // DeleteCollection deletes a collection of objects.
-func (c *secrets) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error {
+func (c *secrets) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error {
 	var timeout time.Duration
-	if listOptions.TimeoutSeconds != nil {
-		timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second
+	if listOpts.TimeoutSeconds != nil {
+		timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
 	}
 	return c.client.Delete().
 		Namespace(c.ns).
 		Resource("secrets").
-		VersionedParams(&listOptions, scheme.ParameterCodec).
+		VersionedParams(&listOpts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Body(options).
-		Do().
+		Body(&opts).
+		Do(ctx).
 		Error()
 }
 
 // Patch applies the patch and returns the patched secret.
-func (c *secrets) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Secret, err error) {
+func (c *secrets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Secret, err error) {
 	result = &v1.Secret{}
 	err = c.client.Patch(pt).
 		Namespace(c.ns).
 		Resource("secrets").
-		SubResource(subresources...).
 		Name(name).
+		SubResource(subresources...).
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(data).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/service.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/service.go
index b0e09413..ddde2ec6 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/service.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/service.go
@@ -19,6 +19,7 @@ limitations under the License.
 package v1
 
 import (
+	"context"
 	"time"
 
 	v1 "k8s.io/api/core/v1"
@@ -37,14 +38,14 @@ type ServicesGetter interface {
 
 // ServiceInterface has methods to work with Service resources.
 type ServiceInterface interface {
-	Create(*v1.Service) (*v1.Service, error)
-	Update(*v1.Service) (*v1.Service, error)
-	UpdateStatus(*v1.Service) (*v1.Service, error)
-	Delete(name string, options *metav1.DeleteOptions) error
-	Get(name string, options metav1.GetOptions) (*v1.Service, error)
-	List(opts metav1.ListOptions) (*v1.ServiceList, error)
-	Watch(opts metav1.ListOptions) (watch.Interface, error)
-	Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Service, err error)
+	Create(ctx context.Context, service *v1.Service, opts metav1.CreateOptions) (*v1.Service, error)
+	Update(ctx context.Context, service *v1.Service, opts metav1.UpdateOptions) (*v1.Service, error)
+	UpdateStatus(ctx context.Context, service *v1.Service, opts metav1.UpdateOptions) (*v1.Service, error)
+	Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error
+	Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.Service, error)
+	List(ctx context.Context, opts metav1.ListOptions) (*v1.ServiceList, error)
+	Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error)
+	Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Service, err error)
 	ServiceExpansion
 }
 
@@ -63,20 +64,20 @@ func newServices(c *CoreV1Client, namespace string) *services {
 }
 
 // Get takes name of the service, and returns the corresponding service object, and an error if there is any.
-func (c *services) Get(name string, options metav1.GetOptions) (result *v1.Service, err error) {
+func (c *services) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Service, err error) {
 	result = &v1.Service{}
 	err = c.client.Get().
 		Namespace(c.ns).
 		Resource("services").
 		Name(name).
 		VersionedParams(&options, scheme.ParameterCodec).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // List takes label and field selectors, and returns the list of Services that match those selectors.
-func (c *services) List(opts metav1.ListOptions) (result *v1.ServiceList, err error) {
+func (c *services) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ServiceList, err error) {
 	var timeout time.Duration
 	if opts.TimeoutSeconds != nil {
 		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
@@ -87,13 +88,13 @@ func (c *services) List(opts metav1.ListOptions) (result *v1.ServiceList, err er
 		Resource("services").
 		VersionedParams(&opts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Watch returns a watch.Interface that watches the requested services.
-func (c *services) Watch(opts metav1.ListOptions) (watch.Interface, error) {
+func (c *services) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
 	var timeout time.Duration
 	if opts.TimeoutSeconds != nil {
 		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
@@ -104,71 +105,74 @@ func (c *services) Watch(opts metav1.ListOptions) (watch.Interface, error) {
 		Resource("services").
 		VersionedParams(&opts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Watch()
+		Watch(ctx)
 }
 
 // Create takes the representation of a service and creates it.  Returns the server's representation of the service, and an error, if there is any.
-func (c *services) Create(service *v1.Service) (result *v1.Service, err error) {
+func (c *services) Create(ctx context.Context, service *v1.Service, opts metav1.CreateOptions) (result *v1.Service, err error) {
 	result = &v1.Service{}
 	err = c.client.Post().
 		Namespace(c.ns).
 		Resource("services").
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(service).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Update takes the representation of a service and updates it. Returns the server's representation of the service, and an error, if there is any.
-func (c *services) Update(service *v1.Service) (result *v1.Service, err error) {
+func (c *services) Update(ctx context.Context, service *v1.Service, opts metav1.UpdateOptions) (result *v1.Service, err error) {
 	result = &v1.Service{}
 	err = c.client.Put().
 		Namespace(c.ns).
 		Resource("services").
 		Name(service.Name).
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(service).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // UpdateStatus was generated because the type contains a Status member.
 // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
-
-func (c *services) UpdateStatus(service *v1.Service) (result *v1.Service, err error) {
+func (c *services) UpdateStatus(ctx context.Context, service *v1.Service, opts metav1.UpdateOptions) (result *v1.Service, err error) {
 	result = &v1.Service{}
 	err = c.client.Put().
 		Namespace(c.ns).
 		Resource("services").
 		Name(service.Name).
 		SubResource("status").
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(service).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Delete takes name of the service and deletes it. Returns an error if one occurs.
-func (c *services) Delete(name string, options *metav1.DeleteOptions) error {
+func (c *services) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error {
 	return c.client.Delete().
 		Namespace(c.ns).
 		Resource("services").
 		Name(name).
-		Body(options).
-		Do().
+		Body(&opts).
+		Do(ctx).
 		Error()
 }
 
 // Patch applies the patch and returns the patched service.
-func (c *services) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Service, err error) {
+func (c *services) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Service, err error) {
 	result = &v1.Service{}
 	err = c.client.Patch(pt).
 		Namespace(c.ns).
 		Resource("services").
-		SubResource(subresources...).
 		Name(name).
+		SubResource(subresources...).
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(data).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/serviceaccount.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/serviceaccount.go
index 50af6a21..c2ddfbfd 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/serviceaccount.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/serviceaccount.go
@@ -19,8 +19,10 @@ limitations under the License.
 package v1
 
 import (
+	"context"
 	"time"
 
+	authenticationv1 "k8s.io/api/authentication/v1"
 	v1 "k8s.io/api/core/v1"
 	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
 	types "k8s.io/apimachinery/pkg/types"
@@ -37,14 +39,16 @@ type ServiceAccountsGetter interface {
 
 // ServiceAccountInterface has methods to work with ServiceAccount resources.
 type ServiceAccountInterface interface {
-	Create(*v1.ServiceAccount) (*v1.ServiceAccount, error)
-	Update(*v1.ServiceAccount) (*v1.ServiceAccount, error)
-	Delete(name string, options *metav1.DeleteOptions) error
-	DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error
-	Get(name string, options metav1.GetOptions) (*v1.ServiceAccount, error)
-	List(opts metav1.ListOptions) (*v1.ServiceAccountList, error)
-	Watch(opts metav1.ListOptions) (watch.Interface, error)
-	Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.ServiceAccount, err error)
+	Create(ctx context.Context, serviceAccount *v1.ServiceAccount, opts metav1.CreateOptions) (*v1.ServiceAccount, error)
+	Update(ctx context.Context, serviceAccount *v1.ServiceAccount, opts metav1.UpdateOptions) (*v1.ServiceAccount, error)
+	Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error
+	DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error
+	Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.ServiceAccount, error)
+	List(ctx context.Context, opts metav1.ListOptions) (*v1.ServiceAccountList, error)
+	Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error)
+	Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ServiceAccount, err error)
+	CreateToken(ctx context.Context, serviceAccountName string, tokenRequest *authenticationv1.TokenRequest, opts metav1.CreateOptions) (*authenticationv1.TokenRequest, error)
+
 	ServiceAccountExpansion
 }
 
@@ -63,20 +67,20 @@ func newServiceAccounts(c *CoreV1Client, namespace string) *serviceAccounts {
 }
 
 // Get takes name of the serviceAccount, and returns the corresponding serviceAccount object, and an error if there is any.
-func (c *serviceAccounts) Get(name string, options metav1.GetOptions) (result *v1.ServiceAccount, err error) {
+func (c *serviceAccounts) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.ServiceAccount, err error) {
 	result = &v1.ServiceAccount{}
 	err = c.client.Get().
 		Namespace(c.ns).
 		Resource("serviceaccounts").
 		Name(name).
 		VersionedParams(&options, scheme.ParameterCodec).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // List takes label and field selectors, and returns the list of ServiceAccounts that match those selectors.
-func (c *serviceAccounts) List(opts metav1.ListOptions) (result *v1.ServiceAccountList, err error) {
+func (c *serviceAccounts) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ServiceAccountList, err error) {
 	var timeout time.Duration
 	if opts.TimeoutSeconds != nil {
 		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
@@ -87,13 +91,13 @@ func (c *serviceAccounts) List(opts metav1.ListOptions) (result *v1.ServiceAccou
 		Resource("serviceaccounts").
 		VersionedParams(&opts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Watch returns a watch.Interface that watches the requested serviceAccounts.
-func (c *serviceAccounts) Watch(opts metav1.ListOptions) (watch.Interface, error) {
+func (c *serviceAccounts) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
 	var timeout time.Duration
 	if opts.TimeoutSeconds != nil {
 		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
@@ -104,71 +108,89 @@ func (c *serviceAccounts) Watch(opts metav1.ListOptions) (watch.Interface, error
 		Resource("serviceaccounts").
 		VersionedParams(&opts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Watch()
+		Watch(ctx)
 }
 
 // Create takes the representation of a serviceAccount and creates it.  Returns the server's representation of the serviceAccount, and an error, if there is any.
-func (c *serviceAccounts) Create(serviceAccount *v1.ServiceAccount) (result *v1.ServiceAccount, err error) {
+func (c *serviceAccounts) Create(ctx context.Context, serviceAccount *v1.ServiceAccount, opts metav1.CreateOptions) (result *v1.ServiceAccount, err error) {
 	result = &v1.ServiceAccount{}
 	err = c.client.Post().
 		Namespace(c.ns).
 		Resource("serviceaccounts").
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(serviceAccount).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Update takes the representation of a serviceAccount and updates it. Returns the server's representation of the serviceAccount, and an error, if there is any.
-func (c *serviceAccounts) Update(serviceAccount *v1.ServiceAccount) (result *v1.ServiceAccount, err error) {
+func (c *serviceAccounts) Update(ctx context.Context, serviceAccount *v1.ServiceAccount, opts metav1.UpdateOptions) (result *v1.ServiceAccount, err error) {
 	result = &v1.ServiceAccount{}
 	err = c.client.Put().
 		Namespace(c.ns).
 		Resource("serviceaccounts").
 		Name(serviceAccount.Name).
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(serviceAccount).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Delete takes name of the serviceAccount and deletes it. Returns an error if one occurs.
-func (c *serviceAccounts) Delete(name string, options *metav1.DeleteOptions) error {
+func (c *serviceAccounts) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error {
 	return c.client.Delete().
 		Namespace(c.ns).
 		Resource("serviceaccounts").
 		Name(name).
-		Body(options).
-		Do().
+		Body(&opts).
+		Do(ctx).
 		Error()
 }
 
 // DeleteCollection deletes a collection of objects.
-func (c *serviceAccounts) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error {
+func (c *serviceAccounts) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error {
 	var timeout time.Duration
-	if listOptions.TimeoutSeconds != nil {
-		timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second
+	if listOpts.TimeoutSeconds != nil {
+		timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
 	}
 	return c.client.Delete().
 		Namespace(c.ns).
 		Resource("serviceaccounts").
-		VersionedParams(&listOptions, scheme.ParameterCodec).
+		VersionedParams(&listOpts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Body(options).
-		Do().
+		Body(&opts).
+		Do(ctx).
 		Error()
 }
 
 // Patch applies the patch and returns the patched serviceAccount.
-func (c *serviceAccounts) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.ServiceAccount, err error) {
+func (c *serviceAccounts) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ServiceAccount, err error) {
 	result = &v1.ServiceAccount{}
 	err = c.client.Patch(pt).
 		Namespace(c.ns).
 		Resource("serviceaccounts").
-		SubResource(subresources...).
 		Name(name).
+		SubResource(subresources...).
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(data).
-		Do().
+		Do(ctx).
+		Into(result)
+	return
+}
+
+// CreateToken takes the representation of a tokenRequest and creates it.  Returns the server's representation of the tokenRequest, and an error, if there is any.
+func (c *serviceAccounts) CreateToken(ctx context.Context, serviceAccountName string, tokenRequest *authenticationv1.TokenRequest, opts metav1.CreateOptions) (result *authenticationv1.TokenRequest, err error) {
+	result = &authenticationv1.TokenRequest{}
+	err = c.client.Post().
+		Namespace(c.ns).
+		Resource("serviceaccounts").
+		Name(serviceAccountName).
+		SubResource("token").
+		VersionedParams(&opts, scheme.ParameterCodec).
+		Body(tokenRequest).
+		Do(ctx).
 		Into(result)
 	return
 }
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/serviceaccount_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/serviceaccount_expansion.go
deleted file mode 100644
index eaf643f1..00000000
--- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/serviceaccount_expansion.go
+++ /dev/null
@@ -1,41 +0,0 @@
-/*
-Copyright 2018 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package v1
-
-import (
-	authenticationv1 "k8s.io/api/authentication/v1"
-)
-
-// The ServiceAccountExpansion interface allows manually adding extra methods
-// to the ServiceAccountInterface.
-type ServiceAccountExpansion interface {
-	CreateToken(name string, tr *authenticationv1.TokenRequest) (*authenticationv1.TokenRequest, error)
-}
-
-// CreateToken creates a new token for a serviceaccount.
-func (c *serviceAccounts) CreateToken(name string, tr *authenticationv1.TokenRequest) (*authenticationv1.TokenRequest, error) {
-	result := &authenticationv1.TokenRequest{}
-	err := c.client.Post().
-		Namespace(c.ns).
-		Resource("serviceaccounts").
-		SubResource("token").
-		Name(name).
-		Body(tr).
-		Do().
-		Into(result)
-	return result, err
-}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1alpha1/endpointslice.go b/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1alpha1/endpointslice.go
index 41751476..63b4627d 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1alpha1/endpointslice.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1alpha1/endpointslice.go
@@ -19,6 +19,7 @@ limitations under the License.
 package v1alpha1
 
 import (
+	"context"
 	"time"
 
 	v1alpha1 "k8s.io/api/discovery/v1alpha1"
@@ -37,14 +38,14 @@ type EndpointSlicesGetter interface {
 
 // EndpointSliceInterface has methods to work with EndpointSlice resources.
 type EndpointSliceInterface interface {
-	Create(*v1alpha1.EndpointSlice) (*v1alpha1.EndpointSlice, error)
-	Update(*v1alpha1.EndpointSlice) (*v1alpha1.EndpointSlice, error)
-	Delete(name string, options *v1.DeleteOptions) error
-	DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error
-	Get(name string, options v1.GetOptions) (*v1alpha1.EndpointSlice, error)
-	List(opts v1.ListOptions) (*v1alpha1.EndpointSliceList, error)
-	Watch(opts v1.ListOptions) (watch.Interface, error)
-	Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.EndpointSlice, err error)
+	Create(ctx context.Context, endpointSlice *v1alpha1.EndpointSlice, opts v1.CreateOptions) (*v1alpha1.EndpointSlice, error)
+	Update(ctx context.Context, endpointSlice *v1alpha1.EndpointSlice, opts v1.UpdateOptions) (*v1alpha1.EndpointSlice, error)
+	Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
+	DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
+	Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.EndpointSlice, error)
+	List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.EndpointSliceList, error)
+	Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
+	Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.EndpointSlice, err error)
 	EndpointSliceExpansion
 }
 
@@ -63,20 +64,20 @@ func newEndpointSlices(c *DiscoveryV1alpha1Client, namespace string) *endpointSl
 }
 
 // Get takes name of the endpointSlice, and returns the corresponding endpointSlice object, and an error if there is any.
-func (c *endpointSlices) Get(name string, options v1.GetOptions) (result *v1alpha1.EndpointSlice, err error) {
+func (c *endpointSlices) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.EndpointSlice, err error) {
 	result = &v1alpha1.EndpointSlice{}
 	err = c.client.Get().
 		Namespace(c.ns).
 		Resource("endpointslices").
 		Name(name).
 		VersionedParams(&options, scheme.ParameterCodec).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // List takes label and field selectors, and returns the list of EndpointSlices that match those selectors.
-func (c *endpointSlices) List(opts v1.ListOptions) (result *v1alpha1.EndpointSliceList, err error) {
+func (c *endpointSlices) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.EndpointSliceList, err error) {
 	var timeout time.Duration
 	if opts.TimeoutSeconds != nil {
 		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
@@ -87,13 +88,13 @@ func (c *endpointSlices) List(opts v1.ListOptions) (result *v1alpha1.EndpointSli
 		Resource("endpointslices").
 		VersionedParams(&opts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Watch returns a watch.Interface that watches the requested endpointSlices.
-func (c *endpointSlices) Watch(opts v1.ListOptions) (watch.Interface, error) {
+func (c *endpointSlices) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
 	var timeout time.Duration
 	if opts.TimeoutSeconds != nil {
 		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
@@ -104,71 +105,74 @@ func (c *endpointSlices) Watch(opts v1.ListOptions) (watch.Interface, error) {
 		Resource("endpointslices").
 		VersionedParams(&opts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Watch()
+		Watch(ctx)
 }
 
 // Create takes the representation of a endpointSlice and creates it.  Returns the server's representation of the endpointSlice, and an error, if there is any.
-func (c *endpointSlices) Create(endpointSlice *v1alpha1.EndpointSlice) (result *v1alpha1.EndpointSlice, err error) {
+func (c *endpointSlices) Create(ctx context.Context, endpointSlice *v1alpha1.EndpointSlice, opts v1.CreateOptions) (result *v1alpha1.EndpointSlice, err error) {
 	result = &v1alpha1.EndpointSlice{}
 	err = c.client.Post().
 		Namespace(c.ns).
 		Resource("endpointslices").
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(endpointSlice).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Update takes the representation of a endpointSlice and updates it. Returns the server's representation of the endpointSlice, and an error, if there is any.
-func (c *endpointSlices) Update(endpointSlice *v1alpha1.EndpointSlice) (result *v1alpha1.EndpointSlice, err error) {
+func (c *endpointSlices) Update(ctx context.Context, endpointSlice *v1alpha1.EndpointSlice, opts v1.UpdateOptions) (result *v1alpha1.EndpointSlice, err error) {
 	result = &v1alpha1.EndpointSlice{}
 	err = c.client.Put().
 		Namespace(c.ns).
 		Resource("endpointslices").
 		Name(endpointSlice.Name).
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(endpointSlice).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Delete takes name of the endpointSlice and deletes it. Returns an error if one occurs.
-func (c *endpointSlices) Delete(name string, options *v1.DeleteOptions) error {
+func (c *endpointSlices) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
 	return c.client.Delete().
 		Namespace(c.ns).
 		Resource("endpointslices").
 		Name(name).
-		Body(options).
-		Do().
+		Body(&opts).
+		Do(ctx).
 		Error()
 }
 
 // DeleteCollection deletes a collection of objects.
-func (c *endpointSlices) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
+func (c *endpointSlices) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
 	var timeout time.Duration
-	if listOptions.TimeoutSeconds != nil {
-		timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second
+	if listOpts.TimeoutSeconds != nil {
+		timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
 	}
 	return c.client.Delete().
 		Namespace(c.ns).
 		Resource("endpointslices").
-		VersionedParams(&listOptions, scheme.ParameterCodec).
+		VersionedParams(&listOpts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Body(options).
-		Do().
+		Body(&opts).
+		Do(ctx).
 		Error()
 }
 
 // Patch applies the patch and returns the patched endpointSlice.
-func (c *endpointSlices) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.EndpointSlice, err error) {
+func (c *endpointSlices) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.EndpointSlice, err error) {
 	result = &v1alpha1.EndpointSlice{}
 	err = c.client.Patch(pt).
 		Namespace(c.ns).
 		Resource("endpointslices").
-		SubResource(subresources...).
 		Name(name).
+		SubResource(subresources...).
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(data).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1beta1/endpointslice.go b/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1beta1/endpointslice.go
index bba656b8..a016663e 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1beta1/endpointslice.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/discovery/v1beta1/endpointslice.go
@@ -19,6 +19,7 @@ limitations under the License.
 package v1beta1
 
 import (
+	"context"
 	"time"
 
 	v1beta1 "k8s.io/api/discovery/v1beta1"
@@ -37,14 +38,14 @@ type EndpointSlicesGetter interface {
 
 // EndpointSliceInterface has methods to work with EndpointSlice resources.
 type EndpointSliceInterface interface {
-	Create(*v1beta1.EndpointSlice) (*v1beta1.EndpointSlice, error)
-	Update(*v1beta1.EndpointSlice) (*v1beta1.EndpointSlice, error)
-	Delete(name string, options *v1.DeleteOptions) error
-	DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error
-	Get(name string, options v1.GetOptions) (*v1beta1.EndpointSlice, error)
-	List(opts v1.ListOptions) (*v1beta1.EndpointSliceList, error)
-	Watch(opts v1.ListOptions) (watch.Interface, error)
-	Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.EndpointSlice, err error)
+	Create(ctx context.Context, endpointSlice *v1beta1.EndpointSlice, opts v1.CreateOptions) (*v1beta1.EndpointSlice, error)
+	Update(ctx context.Context, endpointSlice *v1beta1.EndpointSlice, opts v1.UpdateOptions) (*v1beta1.EndpointSlice, error)
+	Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
+	DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
+	Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.EndpointSlice, error)
+	List(ctx context.Context, opts v1.ListOptions) (*v1beta1.EndpointSliceList, error)
+	Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
+	Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.EndpointSlice, err error)
 	EndpointSliceExpansion
 }
 
@@ -63,20 +64,20 @@ func newEndpointSlices(c *DiscoveryV1beta1Client, namespace string) *endpointSli
 }
 
 // Get takes name of the endpointSlice, and returns the corresponding endpointSlice object, and an error if there is any.
-func (c *endpointSlices) Get(name string, options v1.GetOptions) (result *v1beta1.EndpointSlice, err error) {
+func (c *endpointSlices) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.EndpointSlice, err error) {
 	result = &v1beta1.EndpointSlice{}
 	err = c.client.Get().
 		Namespace(c.ns).
 		Resource("endpointslices").
 		Name(name).
 		VersionedParams(&options, scheme.ParameterCodec).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // List takes label and field selectors, and returns the list of EndpointSlices that match those selectors.
-func (c *endpointSlices) List(opts v1.ListOptions) (result *v1beta1.EndpointSliceList, err error) {
+func (c *endpointSlices) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.EndpointSliceList, err error) {
 	var timeout time.Duration
 	if opts.TimeoutSeconds != nil {
 		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
@@ -87,13 +88,13 @@ func (c *endpointSlices) List(opts v1.ListOptions) (result *v1beta1.EndpointSlic
 		Resource("endpointslices").
 		VersionedParams(&opts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Watch returns a watch.Interface that watches the requested endpointSlices.
-func (c *endpointSlices) Watch(opts v1.ListOptions) (watch.Interface, error) {
+func (c *endpointSlices) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
 	var timeout time.Duration
 	if opts.TimeoutSeconds != nil {
 		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
@@ -104,71 +105,74 @@ func (c *endpointSlices) Watch(opts v1.ListOptions) (watch.Interface, error) {
 		Resource("endpointslices").
 		VersionedParams(&opts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Watch()
+		Watch(ctx)
 }
 
 // Create takes the representation of a endpointSlice and creates it.  Returns the server's representation of the endpointSlice, and an error, if there is any.
-func (c *endpointSlices) Create(endpointSlice *v1beta1.EndpointSlice) (result *v1beta1.EndpointSlice, err error) {
+func (c *endpointSlices) Create(ctx context.Context, endpointSlice *v1beta1.EndpointSlice, opts v1.CreateOptions) (result *v1beta1.EndpointSlice, err error) {
 	result = &v1beta1.EndpointSlice{}
 	err = c.client.Post().
 		Namespace(c.ns).
 		Resource("endpointslices").
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(endpointSlice).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Update takes the representation of a endpointSlice and updates it. Returns the server's representation of the endpointSlice, and an error, if there is any.
-func (c *endpointSlices) Update(endpointSlice *v1beta1.EndpointSlice) (result *v1beta1.EndpointSlice, err error) {
+func (c *endpointSlices) Update(ctx context.Context, endpointSlice *v1beta1.EndpointSlice, opts v1.UpdateOptions) (result *v1beta1.EndpointSlice, err error) {
 	result = &v1beta1.EndpointSlice{}
 	err = c.client.Put().
 		Namespace(c.ns).
 		Resource("endpointslices").
 		Name(endpointSlice.Name).
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(endpointSlice).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Delete takes name of the endpointSlice and deletes it. Returns an error if one occurs.
-func (c *endpointSlices) Delete(name string, options *v1.DeleteOptions) error {
+func (c *endpointSlices) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
 	return c.client.Delete().
 		Namespace(c.ns).
 		Resource("endpointslices").
 		Name(name).
-		Body(options).
-		Do().
+		Body(&opts).
+		Do(ctx).
 		Error()
 }
 
 // DeleteCollection deletes a collection of objects.
-func (c *endpointSlices) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
+func (c *endpointSlices) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
 	var timeout time.Duration
-	if listOptions.TimeoutSeconds != nil {
-		timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second
+	if listOpts.TimeoutSeconds != nil {
+		timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
 	}
 	return c.client.Delete().
 		Namespace(c.ns).
 		Resource("endpointslices").
-		VersionedParams(&listOptions, scheme.ParameterCodec).
+		VersionedParams(&listOpts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Body(options).
-		Do().
+		Body(&opts).
+		Do(ctx).
 		Error()
 }
 
 // Patch applies the patch and returns the patched endpointSlice.
-func (c *endpointSlices) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.EndpointSlice, err error) {
+func (c *endpointSlices) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.EndpointSlice, err error) {
 	result = &v1beta1.EndpointSlice{}
 	err = c.client.Patch(pt).
 		Namespace(c.ns).
 		Resource("endpointslices").
-		SubResource(subresources...).
 		Name(name).
+		SubResource(subresources...).
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(data).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/event.go b/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/event.go
index 143281b2..4cdc471f 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/event.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/event.go
@@ -19,6 +19,7 @@ limitations under the License.
 package v1beta1
 
 import (
+	"context"
 	"time"
 
 	v1beta1 "k8s.io/api/events/v1beta1"
@@ -37,14 +38,14 @@ type EventsGetter interface {
 
 // EventInterface has methods to work with Event resources.
 type EventInterface interface {
-	Create(*v1beta1.Event) (*v1beta1.Event, error)
-	Update(*v1beta1.Event) (*v1beta1.Event, error)
-	Delete(name string, options *v1.DeleteOptions) error
-	DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error
-	Get(name string, options v1.GetOptions) (*v1beta1.Event, error)
-	List(opts v1.ListOptions) (*v1beta1.EventList, error)
-	Watch(opts v1.ListOptions) (watch.Interface, error)
-	Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.Event, err error)
+	Create(ctx context.Context, event *v1beta1.Event, opts v1.CreateOptions) (*v1beta1.Event, error)
+	Update(ctx context.Context, event *v1beta1.Event, opts v1.UpdateOptions) (*v1beta1.Event, error)
+	Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
+	DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
+	Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.Event, error)
+	List(ctx context.Context, opts v1.ListOptions) (*v1beta1.EventList, error)
+	Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
+	Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.Event, err error)
 	EventExpansion
 }
 
@@ -63,20 +64,20 @@ func newEvents(c *EventsV1beta1Client, namespace string) *events {
 }
 
 // Get takes name of the event, and returns the corresponding event object, and an error if there is any.
-func (c *events) Get(name string, options v1.GetOptions) (result *v1beta1.Event, err error) {
+func (c *events) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.Event, err error) {
 	result = &v1beta1.Event{}
 	err = c.client.Get().
 		Namespace(c.ns).
 		Resource("events").
 		Name(name).
 		VersionedParams(&options, scheme.ParameterCodec).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // List takes label and field selectors, and returns the list of Events that match those selectors.
-func (c *events) List(opts v1.ListOptions) (result *v1beta1.EventList, err error) {
+func (c *events) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.EventList, err error) {
 	var timeout time.Duration
 	if opts.TimeoutSeconds != nil {
 		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
@@ -87,13 +88,13 @@ func (c *events) List(opts v1.ListOptions) (result *v1beta1.EventList, err error
 		Resource("events").
 		VersionedParams(&opts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Watch returns a watch.Interface that watches the requested events.
-func (c *events) Watch(opts v1.ListOptions) (watch.Interface, error) {
+func (c *events) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
 	var timeout time.Duration
 	if opts.TimeoutSeconds != nil {
 		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
@@ -104,71 +105,74 @@ func (c *events) Watch(opts v1.ListOptions) (watch.Interface, error) {
 		Resource("events").
 		VersionedParams(&opts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Watch()
+		Watch(ctx)
 }
 
 // Create takes the representation of a event and creates it.  Returns the server's representation of the event, and an error, if there is any.
-func (c *events) Create(event *v1beta1.Event) (result *v1beta1.Event, err error) {
+func (c *events) Create(ctx context.Context, event *v1beta1.Event, opts v1.CreateOptions) (result *v1beta1.Event, err error) {
 	result = &v1beta1.Event{}
 	err = c.client.Post().
 		Namespace(c.ns).
 		Resource("events").
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(event).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Update takes the representation of a event and updates it. Returns the server's representation of the event, and an error, if there is any.
-func (c *events) Update(event *v1beta1.Event) (result *v1beta1.Event, err error) {
+func (c *events) Update(ctx context.Context, event *v1beta1.Event, opts v1.UpdateOptions) (result *v1beta1.Event, err error) {
 	result = &v1beta1.Event{}
 	err = c.client.Put().
 		Namespace(c.ns).
 		Resource("events").
 		Name(event.Name).
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(event).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Delete takes name of the event and deletes it. Returns an error if one occurs.
-func (c *events) Delete(name string, options *v1.DeleteOptions) error {
+func (c *events) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
 	return c.client.Delete().
 		Namespace(c.ns).
 		Resource("events").
 		Name(name).
-		Body(options).
-		Do().
+		Body(&opts).
+		Do(ctx).
 		Error()
 }
 
 // DeleteCollection deletes a collection of objects.
-func (c *events) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
+func (c *events) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
 	var timeout time.Duration
-	if listOptions.TimeoutSeconds != nil {
-		timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second
+	if listOpts.TimeoutSeconds != nil {
+		timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
 	}
 	return c.client.Delete().
 		Namespace(c.ns).
 		Resource("events").
-		VersionedParams(&listOptions, scheme.ParameterCodec).
+		VersionedParams(&listOpts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Body(options).
-		Do().
+		Body(&opts).
+		Do(ctx).
 		Error()
 }
 
 // Patch applies the patch and returns the patched event.
-func (c *events) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.Event, err error) {
+func (c *events) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.Event, err error) {
 	result = &v1beta1.Event{}
 	err = c.client.Patch(pt).
 		Namespace(c.ns).
 		Resource("events").
-		SubResource(subresources...).
 		Name(name).
+		SubResource(subresources...).
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(data).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/event_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/event_expansion.go
index 312ee428..e0ae41df 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/event_expansion.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/event_expansion.go
@@ -17,6 +17,7 @@ limitations under the License.
 package v1beta1
 
 import (
+	"context"
 	"fmt"
 
 	"k8s.io/api/events/v1beta1"
@@ -51,7 +52,7 @@ func (e *events) CreateWithEventNamespace(event *v1beta1.Event) (*v1beta1.Event,
 		NamespaceIfScoped(event.Namespace, len(event.Namespace) > 0).
 		Resource("events").
 		Body(event).
-		Do().
+		Do(context.TODO()).
 		Into(result)
 	return result, err
 }
@@ -72,7 +73,7 @@ func (e *events) UpdateWithEventNamespace(event *v1beta1.Event) (*v1beta1.Event,
 		Resource("events").
 		Name(event.Name).
 		Body(event).
-		Do().
+		Do(context.TODO()).
 		Into(result)
 	return result, err
 }
@@ -92,7 +93,7 @@ func (e *events) PatchWithEventNamespace(event *v1beta1.Event, data []byte) (*v1
 		Resource("events").
 		Name(event.Name).
 		Body(data).
-		Do().
+		Do(context.TODO()).
 		Into(result)
 	return result, err
 }
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/daemonset.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/daemonset.go
index 93b1ae9b..0ba8bfc9 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/daemonset.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/daemonset.go
@@ -19,6 +19,7 @@ limitations under the License.
 package v1beta1
 
 import (
+	"context"
 	"time"
 
 	v1beta1 "k8s.io/api/extensions/v1beta1"
@@ -37,15 +38,15 @@ type DaemonSetsGetter interface {
 
 // DaemonSetInterface has methods to work with DaemonSet resources.
 type DaemonSetInterface interface {
-	Create(*v1beta1.DaemonSet) (*v1beta1.DaemonSet, error)
-	Update(*v1beta1.DaemonSet) (*v1beta1.DaemonSet, error)
-	UpdateStatus(*v1beta1.DaemonSet) (*v1beta1.DaemonSet, error)
-	Delete(name string, options *v1.DeleteOptions) error
-	DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error
-	Get(name string, options v1.GetOptions) (*v1beta1.DaemonSet, error)
-	List(opts v1.ListOptions) (*v1beta1.DaemonSetList, error)
-	Watch(opts v1.ListOptions) (watch.Interface, error)
-	Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.DaemonSet, err error)
+	Create(ctx context.Context, daemonSet *v1beta1.DaemonSet, opts v1.CreateOptions) (*v1beta1.DaemonSet, error)
+	Update(ctx context.Context, daemonSet *v1beta1.DaemonSet, opts v1.UpdateOptions) (*v1beta1.DaemonSet, error)
+	UpdateStatus(ctx context.Context, daemonSet *v1beta1.DaemonSet, opts v1.UpdateOptions) (*v1beta1.DaemonSet, error)
+	Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
+	DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
+	Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.DaemonSet, error)
+	List(ctx context.Context, opts v1.ListOptions) (*v1beta1.DaemonSetList, error)
+	Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
+	Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.DaemonSet, err error)
 	DaemonSetExpansion
 }
 
@@ -64,20 +65,20 @@ func newDaemonSets(c *ExtensionsV1beta1Client, namespace string) *daemonSets {
 }
 
 // Get takes name of the daemonSet, and returns the corresponding daemonSet object, and an error if there is any.
-func (c *daemonSets) Get(name string, options v1.GetOptions) (result *v1beta1.DaemonSet, err error) {
+func (c *daemonSets) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.DaemonSet, err error) {
 	result = &v1beta1.DaemonSet{}
 	err = c.client.Get().
 		Namespace(c.ns).
 		Resource("daemonsets").
 		Name(name).
 		VersionedParams(&options, scheme.ParameterCodec).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // List takes label and field selectors, and returns the list of DaemonSets that match those selectors.
-func (c *daemonSets) List(opts v1.ListOptions) (result *v1beta1.DaemonSetList, err error) {
+func (c *daemonSets) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.DaemonSetList, err error) {
 	var timeout time.Duration
 	if opts.TimeoutSeconds != nil {
 		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
@@ -88,13 +89,13 @@ func (c *daemonSets) List(opts v1.ListOptions) (result *v1beta1.DaemonSetList, e
 		Resource("daemonsets").
 		VersionedParams(&opts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Watch returns a watch.Interface that watches the requested daemonSets.
-func (c *daemonSets) Watch(opts v1.ListOptions) (watch.Interface, error) {
+func (c *daemonSets) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
 	var timeout time.Duration
 	if opts.TimeoutSeconds != nil {
 		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
@@ -105,87 +106,90 @@ func (c *daemonSets) Watch(opts v1.ListOptions) (watch.Interface, error) {
 		Resource("daemonsets").
 		VersionedParams(&opts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Watch()
+		Watch(ctx)
 }
 
 // Create takes the representation of a daemonSet and creates it.  Returns the server's representation of the daemonSet, and an error, if there is any.
-func (c *daemonSets) Create(daemonSet *v1beta1.DaemonSet) (result *v1beta1.DaemonSet, err error) {
+func (c *daemonSets) Create(ctx context.Context, daemonSet *v1beta1.DaemonSet, opts v1.CreateOptions) (result *v1beta1.DaemonSet, err error) {
 	result = &v1beta1.DaemonSet{}
 	err = c.client.Post().
 		Namespace(c.ns).
 		Resource("daemonsets").
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(daemonSet).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Update takes the representation of a daemonSet and updates it. Returns the server's representation of the daemonSet, and an error, if there is any.
-func (c *daemonSets) Update(daemonSet *v1beta1.DaemonSet) (result *v1beta1.DaemonSet, err error) {
+func (c *daemonSets) Update(ctx context.Context, daemonSet *v1beta1.DaemonSet, opts v1.UpdateOptions) (result *v1beta1.DaemonSet, err error) {
 	result = &v1beta1.DaemonSet{}
 	err = c.client.Put().
 		Namespace(c.ns).
 		Resource("daemonsets").
 		Name(daemonSet.Name).
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(daemonSet).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // UpdateStatus was generated because the type contains a Status member.
 // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
-
-func (c *daemonSets) UpdateStatus(daemonSet *v1beta1.DaemonSet) (result *v1beta1.DaemonSet, err error) {
+func (c *daemonSets) UpdateStatus(ctx context.Context, daemonSet *v1beta1.DaemonSet, opts v1.UpdateOptions) (result *v1beta1.DaemonSet, err error) {
 	result = &v1beta1.DaemonSet{}
 	err = c.client.Put().
 		Namespace(c.ns).
 		Resource("daemonsets").
 		Name(daemonSet.Name).
 		SubResource("status").
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(daemonSet).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Delete takes name of the daemonSet and deletes it. Returns an error if one occurs.
-func (c *daemonSets) Delete(name string, options *v1.DeleteOptions) error {
+func (c *daemonSets) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
 	return c.client.Delete().
 		Namespace(c.ns).
 		Resource("daemonsets").
 		Name(name).
-		Body(options).
-		Do().
+		Body(&opts).
+		Do(ctx).
 		Error()
 }
 
 // DeleteCollection deletes a collection of objects.
-func (c *daemonSets) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
+func (c *daemonSets) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
 	var timeout time.Duration
-	if listOptions.TimeoutSeconds != nil {
-		timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second
+	if listOpts.TimeoutSeconds != nil {
+		timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
 	}
 	return c.client.Delete().
 		Namespace(c.ns).
 		Resource("daemonsets").
-		VersionedParams(&listOptions, scheme.ParameterCodec).
+		VersionedParams(&listOpts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Body(options).
-		Do().
+		Body(&opts).
+		Do(ctx).
 		Error()
 }
 
 // Patch applies the patch and returns the patched daemonSet.
-func (c *daemonSets) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.DaemonSet, err error) {
+func (c *daemonSets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.DaemonSet, err error) {
 	result = &v1beta1.DaemonSet{}
 	err = c.client.Patch(pt).
 		Namespace(c.ns).
 		Resource("daemonsets").
-		SubResource(subresources...).
 		Name(name).
+		SubResource(subresources...).
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(data).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/deployment.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/deployment.go
index 5557b9f2..4265f6de 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/deployment.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/deployment.go
@@ -19,6 +19,7 @@ limitations under the License.
 package v1beta1
 
 import (
+	"context"
 	"time"
 
 	v1beta1 "k8s.io/api/extensions/v1beta1"
@@ -37,17 +38,17 @@ type DeploymentsGetter interface {
 
 // DeploymentInterface has methods to work with Deployment resources.
 type DeploymentInterface interface {
-	Create(*v1beta1.Deployment) (*v1beta1.Deployment, error)
-	Update(*v1beta1.Deployment) (*v1beta1.Deployment, error)
-	UpdateStatus(*v1beta1.Deployment) (*v1beta1.Deployment, error)
-	Delete(name string, options *v1.DeleteOptions) error
-	DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error
-	Get(name string, options v1.GetOptions) (*v1beta1.Deployment, error)
-	List(opts v1.ListOptions) (*v1beta1.DeploymentList, error)
-	Watch(opts v1.ListOptions) (watch.Interface, error)
-	Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.Deployment, err error)
-	GetScale(deploymentName string, options v1.GetOptions) (*v1beta1.Scale, error)
-	UpdateScale(deploymentName string, scale *v1beta1.Scale) (*v1beta1.Scale, error)
+	Create(ctx context.Context, deployment *v1beta1.Deployment, opts v1.CreateOptions) (*v1beta1.Deployment, error)
+	Update(ctx context.Context, deployment *v1beta1.Deployment, opts v1.UpdateOptions) (*v1beta1.Deployment, error)
+	UpdateStatus(ctx context.Context, deployment *v1beta1.Deployment, opts v1.UpdateOptions) (*v1beta1.Deployment, error)
+	Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
+	DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
+	Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.Deployment, error)
+	List(ctx context.Context, opts v1.ListOptions) (*v1beta1.DeploymentList, error)
+	Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
+	Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.Deployment, err error)
+	GetScale(ctx context.Context, deploymentName string, options v1.GetOptions) (*v1beta1.Scale, error)
+	UpdateScale(ctx context.Context, deploymentName string, scale *v1beta1.Scale, opts v1.UpdateOptions) (*v1beta1.Scale, error)
 
 	DeploymentExpansion
 }
@@ -67,20 +68,20 @@ func newDeployments(c *ExtensionsV1beta1Client, namespace string) *deployments {
 }
 
 // Get takes name of the deployment, and returns the corresponding deployment object, and an error if there is any.
-func (c *deployments) Get(name string, options v1.GetOptions) (result *v1beta1.Deployment, err error) {
+func (c *deployments) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.Deployment, err error) {
 	result = &v1beta1.Deployment{}
 	err = c.client.Get().
 		Namespace(c.ns).
 		Resource("deployments").
 		Name(name).
 		VersionedParams(&options, scheme.ParameterCodec).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // List takes label and field selectors, and returns the list of Deployments that match those selectors.
-func (c *deployments) List(opts v1.ListOptions) (result *v1beta1.DeploymentList, err error) {
+func (c *deployments) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.DeploymentList, err error) {
 	var timeout time.Duration
 	if opts.TimeoutSeconds != nil {
 		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
@@ -91,13 +92,13 @@ func (c *deployments) List(opts v1.ListOptions) (result *v1beta1.DeploymentList,
 		Resource("deployments").
 		VersionedParams(&opts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Watch returns a watch.Interface that watches the requested deployments.
-func (c *deployments) Watch(opts v1.ListOptions) (watch.Interface, error) {
+func (c *deployments) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
 	var timeout time.Duration
 	if opts.TimeoutSeconds != nil {
 		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
@@ -108,93 +109,96 @@ func (c *deployments) Watch(opts v1.ListOptions) (watch.Interface, error) {
 		Resource("deployments").
 		VersionedParams(&opts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Watch()
+		Watch(ctx)
 }
 
 // Create takes the representation of a deployment and creates it.  Returns the server's representation of the deployment, and an error, if there is any.
-func (c *deployments) Create(deployment *v1beta1.Deployment) (result *v1beta1.Deployment, err error) {
+func (c *deployments) Create(ctx context.Context, deployment *v1beta1.Deployment, opts v1.CreateOptions) (result *v1beta1.Deployment, err error) {
 	result = &v1beta1.Deployment{}
 	err = c.client.Post().
 		Namespace(c.ns).
 		Resource("deployments").
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(deployment).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Update takes the representation of a deployment and updates it. Returns the server's representation of the deployment, and an error, if there is any.
-func (c *deployments) Update(deployment *v1beta1.Deployment) (result *v1beta1.Deployment, err error) {
+func (c *deployments) Update(ctx context.Context, deployment *v1beta1.Deployment, opts v1.UpdateOptions) (result *v1beta1.Deployment, err error) {
 	result = &v1beta1.Deployment{}
 	err = c.client.Put().
 		Namespace(c.ns).
 		Resource("deployments").
 		Name(deployment.Name).
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(deployment).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // UpdateStatus was generated because the type contains a Status member.
 // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
-
-func (c *deployments) UpdateStatus(deployment *v1beta1.Deployment) (result *v1beta1.Deployment, err error) {
+func (c *deployments) UpdateStatus(ctx context.Context, deployment *v1beta1.Deployment, opts v1.UpdateOptions) (result *v1beta1.Deployment, err error) {
 	result = &v1beta1.Deployment{}
 	err = c.client.Put().
 		Namespace(c.ns).
 		Resource("deployments").
 		Name(deployment.Name).
 		SubResource("status").
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(deployment).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Delete takes name of the deployment and deletes it. Returns an error if one occurs.
-func (c *deployments) Delete(name string, options *v1.DeleteOptions) error {
+func (c *deployments) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
 	return c.client.Delete().
 		Namespace(c.ns).
 		Resource("deployments").
 		Name(name).
-		Body(options).
-		Do().
+		Body(&opts).
+		Do(ctx).
 		Error()
 }
 
 // DeleteCollection deletes a collection of objects.
-func (c *deployments) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
+func (c *deployments) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
 	var timeout time.Duration
-	if listOptions.TimeoutSeconds != nil {
-		timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second
+	if listOpts.TimeoutSeconds != nil {
+		timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
 	}
 	return c.client.Delete().
 		Namespace(c.ns).
 		Resource("deployments").
-		VersionedParams(&listOptions, scheme.ParameterCodec).
+		VersionedParams(&listOpts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Body(options).
-		Do().
+		Body(&opts).
+		Do(ctx).
 		Error()
 }
 
 // Patch applies the patch and returns the patched deployment.
-func (c *deployments) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.Deployment, err error) {
+func (c *deployments) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.Deployment, err error) {
 	result = &v1beta1.Deployment{}
 	err = c.client.Patch(pt).
 		Namespace(c.ns).
 		Resource("deployments").
-		SubResource(subresources...).
 		Name(name).
+		SubResource(subresources...).
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(data).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // GetScale takes name of the deployment, and returns the corresponding v1beta1.Scale object, and an error if there is any.
-func (c *deployments) GetScale(deploymentName string, options v1.GetOptions) (result *v1beta1.Scale, err error) {
+func (c *deployments) GetScale(ctx context.Context, deploymentName string, options v1.GetOptions) (result *v1beta1.Scale, err error) {
 	result = &v1beta1.Scale{}
 	err = c.client.Get().
 		Namespace(c.ns).
@@ -202,21 +206,22 @@ func (c *deployments) GetScale(deploymentName string, options v1.GetOptions) (re
 		Name(deploymentName).
 		SubResource("scale").
 		VersionedParams(&options, scheme.ParameterCodec).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // UpdateScale takes the top resource name and the representation of a scale and updates it. Returns the server's representation of the scale, and an error, if there is any.
-func (c *deployments) UpdateScale(deploymentName string, scale *v1beta1.Scale) (result *v1beta1.Scale, err error) {
+func (c *deployments) UpdateScale(ctx context.Context, deploymentName string, scale *v1beta1.Scale, opts v1.UpdateOptions) (result *v1beta1.Scale, err error) {
 	result = &v1beta1.Scale{}
 	err = c.client.Put().
 		Namespace(c.ns).
 		Resource("deployments").
 		Name(deploymentName).
 		SubResource("scale").
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(scale).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/deployment_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/deployment_expansion.go
index 24734be6..5c409ac9 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/deployment_expansion.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/deployment_expansion.go
@@ -16,14 +16,20 @@ limitations under the License.
 
 package v1beta1
 
-import "k8s.io/api/extensions/v1beta1"
+import (
+	"context"
+
+	"k8s.io/api/extensions/v1beta1"
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	scheme "k8s.io/client-go/kubernetes/scheme"
+)
 
 // The DeploymentExpansion interface allows manually adding extra methods to the DeploymentInterface.
 type DeploymentExpansion interface {
-	Rollback(*v1beta1.DeploymentRollback) error
+	Rollback(context.Context, *v1beta1.DeploymentRollback, metav1.CreateOptions) error
 }
 
 // Rollback applied the provided DeploymentRollback to the named deployment in the current namespace.
-func (c *deployments) Rollback(deploymentRollback *v1beta1.DeploymentRollback) error {
-	return c.client.Post().Namespace(c.ns).Resource("deployments").Name(deploymentRollback.Name).SubResource("rollback").Body(deploymentRollback).Do().Error()
+func (c *deployments) Rollback(ctx context.Context, deploymentRollback *v1beta1.DeploymentRollback, opts metav1.CreateOptions) error {
+	return c.client.Post().Namespace(c.ns).Resource("deployments").Name(deploymentRollback.Name).VersionedParams(&opts, scheme.ParameterCodec).SubResource("rollback").Body(deploymentRollback).Do(ctx).Error()
 }
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/ingress.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/ingress.go
index 4da51c36..b19e2455 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/ingress.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/ingress.go
@@ -19,6 +19,7 @@ limitations under the License.
 package v1beta1
 
 import (
+	"context"
 	"time"
 
 	v1beta1 "k8s.io/api/extensions/v1beta1"
@@ -37,15 +38,15 @@ type IngressesGetter interface {
 
 // IngressInterface has methods to work with Ingress resources.
 type IngressInterface interface {
-	Create(*v1beta1.Ingress) (*v1beta1.Ingress, error)
-	Update(*v1beta1.Ingress) (*v1beta1.Ingress, error)
-	UpdateStatus(*v1beta1.Ingress) (*v1beta1.Ingress, error)
-	Delete(name string, options *v1.DeleteOptions) error
-	DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error
-	Get(name string, options v1.GetOptions) (*v1beta1.Ingress, error)
-	List(opts v1.ListOptions) (*v1beta1.IngressList, error)
-	Watch(opts v1.ListOptions) (watch.Interface, error)
-	Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.Ingress, err error)
+	Create(ctx context.Context, ingress *v1beta1.Ingress, opts v1.CreateOptions) (*v1beta1.Ingress, error)
+	Update(ctx context.Context, ingress *v1beta1.Ingress, opts v1.UpdateOptions) (*v1beta1.Ingress, error)
+	UpdateStatus(ctx context.Context, ingress *v1beta1.Ingress, opts v1.UpdateOptions) (*v1beta1.Ingress, error)
+	Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
+	DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
+	Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.Ingress, error)
+	List(ctx context.Context, opts v1.ListOptions) (*v1beta1.IngressList, error)
+	Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
+	Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.Ingress, err error)
 	IngressExpansion
 }
 
@@ -64,20 +65,20 @@ func newIngresses(c *ExtensionsV1beta1Client, namespace string) *ingresses {
 }
 
 // Get takes name of the ingress, and returns the corresponding ingress object, and an error if there is any.
-func (c *ingresses) Get(name string, options v1.GetOptions) (result *v1beta1.Ingress, err error) {
+func (c *ingresses) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.Ingress, err error) {
 	result = &v1beta1.Ingress{}
 	err = c.client.Get().
 		Namespace(c.ns).
 		Resource("ingresses").
 		Name(name).
 		VersionedParams(&options, scheme.ParameterCodec).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // List takes label and field selectors, and returns the list of Ingresses that match those selectors.
-func (c *ingresses) List(opts v1.ListOptions) (result *v1beta1.IngressList, err error) {
+func (c *ingresses) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.IngressList, err error) {
 	var timeout time.Duration
 	if opts.TimeoutSeconds != nil {
 		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
@@ -88,13 +89,13 @@ func (c *ingresses) List(opts v1.ListOptions) (result *v1beta1.IngressList, err
 		Resource("ingresses").
 		VersionedParams(&opts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Watch returns a watch.Interface that watches the requested ingresses.
-func (c *ingresses) Watch(opts v1.ListOptions) (watch.Interface, error) {
+func (c *ingresses) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
 	var timeout time.Duration
 	if opts.TimeoutSeconds != nil {
 		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
@@ -105,87 +106,90 @@ func (c *ingresses) Watch(opts v1.ListOptions) (watch.Interface, error) {
 		Resource("ingresses").
 		VersionedParams(&opts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Watch()
+		Watch(ctx)
 }
 
 // Create takes the representation of a ingress and creates it.  Returns the server's representation of the ingress, and an error, if there is any.
-func (c *ingresses) Create(ingress *v1beta1.Ingress) (result *v1beta1.Ingress, err error) {
+func (c *ingresses) Create(ctx context.Context, ingress *v1beta1.Ingress, opts v1.CreateOptions) (result *v1beta1.Ingress, err error) {
 	result = &v1beta1.Ingress{}
 	err = c.client.Post().
 		Namespace(c.ns).
 		Resource("ingresses").
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(ingress).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Update takes the representation of a ingress and updates it. Returns the server's representation of the ingress, and an error, if there is any.
-func (c *ingresses) Update(ingress *v1beta1.Ingress) (result *v1beta1.Ingress, err error) {
+func (c *ingresses) Update(ctx context.Context, ingress *v1beta1.Ingress, opts v1.UpdateOptions) (result *v1beta1.Ingress, err error) {
 	result = &v1beta1.Ingress{}
 	err = c.client.Put().
 		Namespace(c.ns).
 		Resource("ingresses").
 		Name(ingress.Name).
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(ingress).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // UpdateStatus was generated because the type contains a Status member.
 // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
-
-func (c *ingresses) UpdateStatus(ingress *v1beta1.Ingress) (result *v1beta1.Ingress, err error) {
+func (c *ingresses) UpdateStatus(ctx context.Context, ingress *v1beta1.Ingress, opts v1.UpdateOptions) (result *v1beta1.Ingress, err error) {
 	result = &v1beta1.Ingress{}
 	err = c.client.Put().
 		Namespace(c.ns).
 		Resource("ingresses").
 		Name(ingress.Name).
 		SubResource("status").
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(ingress).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Delete takes name of the ingress and deletes it. Returns an error if one occurs.
-func (c *ingresses) Delete(name string, options *v1.DeleteOptions) error {
+func (c *ingresses) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
 	return c.client.Delete().
 		Namespace(c.ns).
 		Resource("ingresses").
 		Name(name).
-		Body(options).
-		Do().
+		Body(&opts).
+		Do(ctx).
 		Error()
 }
 
 // DeleteCollection deletes a collection of objects.
-func (c *ingresses) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
+func (c *ingresses) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
 	var timeout time.Duration
-	if listOptions.TimeoutSeconds != nil {
-		timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second
+	if listOpts.TimeoutSeconds != nil {
+		timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
 	}
 	return c.client.Delete().
 		Namespace(c.ns).
 		Resource("ingresses").
-		VersionedParams(&listOptions, scheme.ParameterCodec).
+		VersionedParams(&listOpts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Body(options).
-		Do().
+		Body(&opts).
+		Do(ctx).
 		Error()
 }
 
 // Patch applies the patch and returns the patched ingress.
-func (c *ingresses) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.Ingress, err error) {
+func (c *ingresses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.Ingress, err error) {
 	result = &v1beta1.Ingress{}
 	err = c.client.Patch(pt).
 		Namespace(c.ns).
 		Resource("ingresses").
-		SubResource(subresources...).
 		Name(name).
+		SubResource(subresources...).
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(data).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/networkpolicy.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/networkpolicy.go
index 0607e2dd..ed9ae30d 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/networkpolicy.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/networkpolicy.go
@@ -19,6 +19,7 @@ limitations under the License.
 package v1beta1
 
 import (
+	"context"
 	"time"
 
 	v1beta1 "k8s.io/api/extensions/v1beta1"
@@ -37,14 +38,14 @@ type NetworkPoliciesGetter interface {
 
 // NetworkPolicyInterface has methods to work with NetworkPolicy resources.
 type NetworkPolicyInterface interface {
-	Create(*v1beta1.NetworkPolicy) (*v1beta1.NetworkPolicy, error)
-	Update(*v1beta1.NetworkPolicy) (*v1beta1.NetworkPolicy, error)
-	Delete(name string, options *v1.DeleteOptions) error
-	DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error
-	Get(name string, options v1.GetOptions) (*v1beta1.NetworkPolicy, error)
-	List(opts v1.ListOptions) (*v1beta1.NetworkPolicyList, error)
-	Watch(opts v1.ListOptions) (watch.Interface, error)
-	Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.NetworkPolicy, err error)
+	Create(ctx context.Context, networkPolicy *v1beta1.NetworkPolicy, opts v1.CreateOptions) (*v1beta1.NetworkPolicy, error)
+	Update(ctx context.Context, networkPolicy *v1beta1.NetworkPolicy, opts v1.UpdateOptions) (*v1beta1.NetworkPolicy, error)
+	Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
+	DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
+	Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.NetworkPolicy, error)
+	List(ctx context.Context, opts v1.ListOptions) (*v1beta1.NetworkPolicyList, error)
+	Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
+	Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.NetworkPolicy, err error)
 	NetworkPolicyExpansion
 }
 
@@ -63,20 +64,20 @@ func newNetworkPolicies(c *ExtensionsV1beta1Client, namespace string) *networkPo
 }
 
 // Get takes name of the networkPolicy, and returns the corresponding networkPolicy object, and an error if there is any.
-func (c *networkPolicies) Get(name string, options v1.GetOptions) (result *v1beta1.NetworkPolicy, err error) {
+func (c *networkPolicies) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.NetworkPolicy, err error) {
 	result = &v1beta1.NetworkPolicy{}
 	err = c.client.Get().
 		Namespace(c.ns).
 		Resource("networkpolicies").
 		Name(name).
 		VersionedParams(&options, scheme.ParameterCodec).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // List takes label and field selectors, and returns the list of NetworkPolicies that match those selectors.
-func (c *networkPolicies) List(opts v1.ListOptions) (result *v1beta1.NetworkPolicyList, err error) {
+func (c *networkPolicies) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.NetworkPolicyList, err error) {
 	var timeout time.Duration
 	if opts.TimeoutSeconds != nil {
 		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
@@ -87,13 +88,13 @@ func (c *networkPolicies) List(opts v1.ListOptions) (result *v1beta1.NetworkPoli
 		Resource("networkpolicies").
 		VersionedParams(&opts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Watch returns a watch.Interface that watches the requested networkPolicies.
-func (c *networkPolicies) Watch(opts v1.ListOptions) (watch.Interface, error) {
+func (c *networkPolicies) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
 	var timeout time.Duration
 	if opts.TimeoutSeconds != nil {
 		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
@@ -104,71 +105,74 @@ func (c *networkPolicies) Watch(opts v1.ListOptions) (watch.Interface, error) {
 		Resource("networkpolicies").
 		VersionedParams(&opts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Watch()
+		Watch(ctx)
 }
 
 // Create takes the representation of a networkPolicy and creates it.  Returns the server's representation of the networkPolicy, and an error, if there is any.
-func (c *networkPolicies) Create(networkPolicy *v1beta1.NetworkPolicy) (result *v1beta1.NetworkPolicy, err error) {
+func (c *networkPolicies) Create(ctx context.Context, networkPolicy *v1beta1.NetworkPolicy, opts v1.CreateOptions) (result *v1beta1.NetworkPolicy, err error) {
 	result = &v1beta1.NetworkPolicy{}
 	err = c.client.Post().
 		Namespace(c.ns).
 		Resource("networkpolicies").
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(networkPolicy).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Update takes the representation of a networkPolicy and updates it. Returns the server's representation of the networkPolicy, and an error, if there is any.
-func (c *networkPolicies) Update(networkPolicy *v1beta1.NetworkPolicy) (result *v1beta1.NetworkPolicy, err error) {
+func (c *networkPolicies) Update(ctx context.Context, networkPolicy *v1beta1.NetworkPolicy, opts v1.UpdateOptions) (result *v1beta1.NetworkPolicy, err error) {
 	result = &v1beta1.NetworkPolicy{}
 	err = c.client.Put().
 		Namespace(c.ns).
 		Resource("networkpolicies").
 		Name(networkPolicy.Name).
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(networkPolicy).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Delete takes name of the networkPolicy and deletes it. Returns an error if one occurs.
-func (c *networkPolicies) Delete(name string, options *v1.DeleteOptions) error {
+func (c *networkPolicies) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
 	return c.client.Delete().
 		Namespace(c.ns).
 		Resource("networkpolicies").
 		Name(name).
-		Body(options).
-		Do().
+		Body(&opts).
+		Do(ctx).
 		Error()
 }
 
 // DeleteCollection deletes a collection of objects.
-func (c *networkPolicies) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
+func (c *networkPolicies) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
 	var timeout time.Duration
-	if listOptions.TimeoutSeconds != nil {
-		timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second
+	if listOpts.TimeoutSeconds != nil {
+		timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
 	}
 	return c.client.Delete().
 		Namespace(c.ns).
 		Resource("networkpolicies").
-		VersionedParams(&listOptions, scheme.ParameterCodec).
+		VersionedParams(&listOpts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Body(options).
-		Do().
+		Body(&opts).
+		Do(ctx).
 		Error()
 }
 
 // Patch applies the patch and returns the patched networkPolicy.
-func (c *networkPolicies) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.NetworkPolicy, err error) {
+func (c *networkPolicies) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.NetworkPolicy, err error) {
 	result = &v1beta1.NetworkPolicy{}
 	err = c.client.Patch(pt).
 		Namespace(c.ns).
 		Resource("networkpolicies").
-		SubResource(subresources...).
 		Name(name).
+		SubResource(subresources...).
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(data).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/podsecuritypolicy.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/podsecuritypolicy.go
index a947a54a..76e67ded 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/podsecuritypolicy.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/podsecuritypolicy.go
@@ -19,6 +19,7 @@ limitations under the License.
 package v1beta1
 
 import (
+	"context"
 	"time"
 
 	v1beta1 "k8s.io/api/extensions/v1beta1"
@@ -37,14 +38,14 @@ type PodSecurityPoliciesGetter interface {
 
 // PodSecurityPolicyInterface has methods to work with PodSecurityPolicy resources.
 type PodSecurityPolicyInterface interface {
-	Create(*v1beta1.PodSecurityPolicy) (*v1beta1.PodSecurityPolicy, error)
-	Update(*v1beta1.PodSecurityPolicy) (*v1beta1.PodSecurityPolicy, error)
-	Delete(name string, options *v1.DeleteOptions) error
-	DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error
-	Get(name string, options v1.GetOptions) (*v1beta1.PodSecurityPolicy, error)
-	List(opts v1.ListOptions) (*v1beta1.PodSecurityPolicyList, error)
-	Watch(opts v1.ListOptions) (watch.Interface, error)
-	Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.PodSecurityPolicy, err error)
+	Create(ctx context.Context, podSecurityPolicy *v1beta1.PodSecurityPolicy, opts v1.CreateOptions) (*v1beta1.PodSecurityPolicy, error)
+	Update(ctx context.Context, podSecurityPolicy *v1beta1.PodSecurityPolicy, opts v1.UpdateOptions) (*v1beta1.PodSecurityPolicy, error)
+	Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
+	DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
+	Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.PodSecurityPolicy, error)
+	List(ctx context.Context, opts v1.ListOptions) (*v1beta1.PodSecurityPolicyList, error)
+	Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
+	Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.PodSecurityPolicy, err error)
 	PodSecurityPolicyExpansion
 }
 
@@ -61,19 +62,19 @@ func newPodSecurityPolicies(c *ExtensionsV1beta1Client) *podSecurityPolicies {
 }
 
 // Get takes name of the podSecurityPolicy, and returns the corresponding podSecurityPolicy object, and an error if there is any.
-func (c *podSecurityPolicies) Get(name string, options v1.GetOptions) (result *v1beta1.PodSecurityPolicy, err error) {
+func (c *podSecurityPolicies) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.PodSecurityPolicy, err error) {
 	result = &v1beta1.PodSecurityPolicy{}
 	err = c.client.Get().
 		Resource("podsecuritypolicies").
 		Name(name).
 		VersionedParams(&options, scheme.ParameterCodec).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // List takes label and field selectors, and returns the list of PodSecurityPolicies that match those selectors.
-func (c *podSecurityPolicies) List(opts v1.ListOptions) (result *v1beta1.PodSecurityPolicyList, err error) {
+func (c *podSecurityPolicies) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.PodSecurityPolicyList, err error) {
 	var timeout time.Duration
 	if opts.TimeoutSeconds != nil {
 		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
@@ -83,13 +84,13 @@ func (c *podSecurityPolicies) List(opts v1.ListOptions) (result *v1beta1.PodSecu
 		Resource("podsecuritypolicies").
 		VersionedParams(&opts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Watch returns a watch.Interface that watches the requested podSecurityPolicies.
-func (c *podSecurityPolicies) Watch(opts v1.ListOptions) (watch.Interface, error) {
+func (c *podSecurityPolicies) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
 	var timeout time.Duration
 	if opts.TimeoutSeconds != nil {
 		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
@@ -99,66 +100,69 @@ func (c *podSecurityPolicies) Watch(opts v1.ListOptions) (watch.Interface, error
 		Resource("podsecuritypolicies").
 		VersionedParams(&opts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Watch()
+		Watch(ctx)
 }
 
 // Create takes the representation of a podSecurityPolicy and creates it.  Returns the server's representation of the podSecurityPolicy, and an error, if there is any.
-func (c *podSecurityPolicies) Create(podSecurityPolicy *v1beta1.PodSecurityPolicy) (result *v1beta1.PodSecurityPolicy, err error) {
+func (c *podSecurityPolicies) Create(ctx context.Context, podSecurityPolicy *v1beta1.PodSecurityPolicy, opts v1.CreateOptions) (result *v1beta1.PodSecurityPolicy, err error) {
 	result = &v1beta1.PodSecurityPolicy{}
 	err = c.client.Post().
 		Resource("podsecuritypolicies").
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(podSecurityPolicy).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Update takes the representation of a podSecurityPolicy and updates it. Returns the server's representation of the podSecurityPolicy, and an error, if there is any.
-func (c *podSecurityPolicies) Update(podSecurityPolicy *v1beta1.PodSecurityPolicy) (result *v1beta1.PodSecurityPolicy, err error) {
+func (c *podSecurityPolicies) Update(ctx context.Context, podSecurityPolicy *v1beta1.PodSecurityPolicy, opts v1.UpdateOptions) (result *v1beta1.PodSecurityPolicy, err error) {
 	result = &v1beta1.PodSecurityPolicy{}
 	err = c.client.Put().
 		Resource("podsecuritypolicies").
 		Name(podSecurityPolicy.Name).
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(podSecurityPolicy).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Delete takes name of the podSecurityPolicy and deletes it. Returns an error if one occurs.
-func (c *podSecurityPolicies) Delete(name string, options *v1.DeleteOptions) error {
+func (c *podSecurityPolicies) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
 	return c.client.Delete().
 		Resource("podsecuritypolicies").
 		Name(name).
-		Body(options).
-		Do().
+		Body(&opts).
+		Do(ctx).
 		Error()
 }
 
 // DeleteCollection deletes a collection of objects.
-func (c *podSecurityPolicies) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
+func (c *podSecurityPolicies) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
 	var timeout time.Duration
-	if listOptions.TimeoutSeconds != nil {
-		timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second
+	if listOpts.TimeoutSeconds != nil {
+		timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
 	}
 	return c.client.Delete().
 		Resource("podsecuritypolicies").
-		VersionedParams(&listOptions, scheme.ParameterCodec).
+		VersionedParams(&listOpts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Body(options).
-		Do().
+		Body(&opts).
+		Do(ctx).
 		Error()
 }
 
 // Patch applies the patch and returns the patched podSecurityPolicy.
-func (c *podSecurityPolicies) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.PodSecurityPolicy, err error) {
+func (c *podSecurityPolicies) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.PodSecurityPolicy, err error) {
 	result = &v1beta1.PodSecurityPolicy{}
 	err = c.client.Patch(pt).
 		Resource("podsecuritypolicies").
-		SubResource(subresources...).
 		Name(name).
+		SubResource(subresources...).
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(data).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/replicaset.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/replicaset.go
index 44402905..64e3c186 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/replicaset.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/replicaset.go
@@ -19,6 +19,7 @@ limitations under the License.
 package v1beta1
 
 import (
+	"context"
 	"time"
 
 	v1beta1 "k8s.io/api/extensions/v1beta1"
@@ -37,17 +38,17 @@ type ReplicaSetsGetter interface {
 
 // ReplicaSetInterface has methods to work with ReplicaSet resources.
 type ReplicaSetInterface interface {
-	Create(*v1beta1.ReplicaSet) (*v1beta1.ReplicaSet, error)
-	Update(*v1beta1.ReplicaSet) (*v1beta1.ReplicaSet, error)
-	UpdateStatus(*v1beta1.ReplicaSet) (*v1beta1.ReplicaSet, error)
-	Delete(name string, options *v1.DeleteOptions) error
-	DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error
-	Get(name string, options v1.GetOptions) (*v1beta1.ReplicaSet, error)
-	List(opts v1.ListOptions) (*v1beta1.ReplicaSetList, error)
-	Watch(opts v1.ListOptions) (watch.Interface, error)
-	Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.ReplicaSet, err error)
-	GetScale(replicaSetName string, options v1.GetOptions) (*v1beta1.Scale, error)
-	UpdateScale(replicaSetName string, scale *v1beta1.Scale) (*v1beta1.Scale, error)
+	Create(ctx context.Context, replicaSet *v1beta1.ReplicaSet, opts v1.CreateOptions) (*v1beta1.ReplicaSet, error)
+	Update(ctx context.Context, replicaSet *v1beta1.ReplicaSet, opts v1.UpdateOptions) (*v1beta1.ReplicaSet, error)
+	UpdateStatus(ctx context.Context, replicaSet *v1beta1.ReplicaSet, opts v1.UpdateOptions) (*v1beta1.ReplicaSet, error)
+	Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
+	DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
+	Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.ReplicaSet, error)
+	List(ctx context.Context, opts v1.ListOptions) (*v1beta1.ReplicaSetList, error)
+	Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
+	Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.ReplicaSet, err error)
+	GetScale(ctx context.Context, replicaSetName string, options v1.GetOptions) (*v1beta1.Scale, error)
+	UpdateScale(ctx context.Context, replicaSetName string, scale *v1beta1.Scale, opts v1.UpdateOptions) (*v1beta1.Scale, error)
 
 	ReplicaSetExpansion
 }
@@ -67,20 +68,20 @@ func newReplicaSets(c *ExtensionsV1beta1Client, namespace string) *replicaSets {
 }
 
 // Get takes name of the replicaSet, and returns the corresponding replicaSet object, and an error if there is any.
-func (c *replicaSets) Get(name string, options v1.GetOptions) (result *v1beta1.ReplicaSet, err error) {
+func (c *replicaSets) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.ReplicaSet, err error) {
 	result = &v1beta1.ReplicaSet{}
 	err = c.client.Get().
 		Namespace(c.ns).
 		Resource("replicasets").
 		Name(name).
 		VersionedParams(&options, scheme.ParameterCodec).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // List takes label and field selectors, and returns the list of ReplicaSets that match those selectors.
-func (c *replicaSets) List(opts v1.ListOptions) (result *v1beta1.ReplicaSetList, err error) {
+func (c *replicaSets) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.ReplicaSetList, err error) {
 	var timeout time.Duration
 	if opts.TimeoutSeconds != nil {
 		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
@@ -91,13 +92,13 @@ func (c *replicaSets) List(opts v1.ListOptions) (result *v1beta1.ReplicaSetList,
 		Resource("replicasets").
 		VersionedParams(&opts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Watch returns a watch.Interface that watches the requested replicaSets.
-func (c *replicaSets) Watch(opts v1.ListOptions) (watch.Interface, error) {
+func (c *replicaSets) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
 	var timeout time.Duration
 	if opts.TimeoutSeconds != nil {
 		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
@@ -108,93 +109,96 @@ func (c *replicaSets) Watch(opts v1.ListOptions) (watch.Interface, error) {
 		Resource("replicasets").
 		VersionedParams(&opts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Watch()
+		Watch(ctx)
 }
 
 // Create takes the representation of a replicaSet and creates it.  Returns the server's representation of the replicaSet, and an error, if there is any.
-func (c *replicaSets) Create(replicaSet *v1beta1.ReplicaSet) (result *v1beta1.ReplicaSet, err error) {
+func (c *replicaSets) Create(ctx context.Context, replicaSet *v1beta1.ReplicaSet, opts v1.CreateOptions) (result *v1beta1.ReplicaSet, err error) {
 	result = &v1beta1.ReplicaSet{}
 	err = c.client.Post().
 		Namespace(c.ns).
 		Resource("replicasets").
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(replicaSet).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Update takes the representation of a replicaSet and updates it. Returns the server's representation of the replicaSet, and an error, if there is any.
-func (c *replicaSets) Update(replicaSet *v1beta1.ReplicaSet) (result *v1beta1.ReplicaSet, err error) {
+func (c *replicaSets) Update(ctx context.Context, replicaSet *v1beta1.ReplicaSet, opts v1.UpdateOptions) (result *v1beta1.ReplicaSet, err error) {
 	result = &v1beta1.ReplicaSet{}
 	err = c.client.Put().
 		Namespace(c.ns).
 		Resource("replicasets").
 		Name(replicaSet.Name).
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(replicaSet).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // UpdateStatus was generated because the type contains a Status member.
 // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
-
-func (c *replicaSets) UpdateStatus(replicaSet *v1beta1.ReplicaSet) (result *v1beta1.ReplicaSet, err error) {
+func (c *replicaSets) UpdateStatus(ctx context.Context, replicaSet *v1beta1.ReplicaSet, opts v1.UpdateOptions) (result *v1beta1.ReplicaSet, err error) {
 	result = &v1beta1.ReplicaSet{}
 	err = c.client.Put().
 		Namespace(c.ns).
 		Resource("replicasets").
 		Name(replicaSet.Name).
 		SubResource("status").
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(replicaSet).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Delete takes name of the replicaSet and deletes it. Returns an error if one occurs.
-func (c *replicaSets) Delete(name string, options *v1.DeleteOptions) error {
+func (c *replicaSets) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
 	return c.client.Delete().
 		Namespace(c.ns).
 		Resource("replicasets").
 		Name(name).
-		Body(options).
-		Do().
+		Body(&opts).
+		Do(ctx).
 		Error()
 }
 
 // DeleteCollection deletes a collection of objects.
-func (c *replicaSets) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
+func (c *replicaSets) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
 	var timeout time.Duration
-	if listOptions.TimeoutSeconds != nil {
-		timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second
+	if listOpts.TimeoutSeconds != nil {
+		timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
 	}
 	return c.client.Delete().
 		Namespace(c.ns).
 		Resource("replicasets").
-		VersionedParams(&listOptions, scheme.ParameterCodec).
+		VersionedParams(&listOpts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Body(options).
-		Do().
+		Body(&opts).
+		Do(ctx).
 		Error()
 }
 
 // Patch applies the patch and returns the patched replicaSet.
-func (c *replicaSets) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.ReplicaSet, err error) {
+func (c *replicaSets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.ReplicaSet, err error) {
 	result = &v1beta1.ReplicaSet{}
 	err = c.client.Patch(pt).
 		Namespace(c.ns).
 		Resource("replicasets").
-		SubResource(subresources...).
 		Name(name).
+		SubResource(subresources...).
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(data).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // GetScale takes name of the replicaSet, and returns the corresponding v1beta1.Scale object, and an error if there is any.
-func (c *replicaSets) GetScale(replicaSetName string, options v1.GetOptions) (result *v1beta1.Scale, err error) {
+func (c *replicaSets) GetScale(ctx context.Context, replicaSetName string, options v1.GetOptions) (result *v1beta1.Scale, err error) {
 	result = &v1beta1.Scale{}
 	err = c.client.Get().
 		Namespace(c.ns).
@@ -202,21 +206,22 @@ func (c *replicaSets) GetScale(replicaSetName string, options v1.GetOptions) (re
 		Name(replicaSetName).
 		SubResource("scale").
 		VersionedParams(&options, scheme.ParameterCodec).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // UpdateScale takes the top resource name and the representation of a scale and updates it. Returns the server's representation of the scale, and an error, if there is any.
-func (c *replicaSets) UpdateScale(replicaSetName string, scale *v1beta1.Scale) (result *v1beta1.Scale, err error) {
+func (c *replicaSets) UpdateScale(ctx context.Context, replicaSetName string, scale *v1beta1.Scale, opts v1.UpdateOptions) (result *v1beta1.Scale, err error) {
 	result = &v1beta1.Scale{}
 	err = c.client.Put().
 		Namespace(c.ns).
 		Resource("replicasets").
 		Name(replicaSetName).
 		SubResource("scale").
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(scale).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/flowschema.go b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/flowschema.go
index db71d29a..319636f7 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/flowschema.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/flowschema.go
@@ -19,6 +19,7 @@ limitations under the License.
 package v1alpha1
 
 import (
+	"context"
 	"time"
 
 	v1alpha1 "k8s.io/api/flowcontrol/v1alpha1"
@@ -37,15 +38,15 @@ type FlowSchemasGetter interface {
 
 // FlowSchemaInterface has methods to work with FlowSchema resources.
 type FlowSchemaInterface interface {
-	Create(*v1alpha1.FlowSchema) (*v1alpha1.FlowSchema, error)
-	Update(*v1alpha1.FlowSchema) (*v1alpha1.FlowSchema, error)
-	UpdateStatus(*v1alpha1.FlowSchema) (*v1alpha1.FlowSchema, error)
-	Delete(name string, options *v1.DeleteOptions) error
-	DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error
-	Get(name string, options v1.GetOptions) (*v1alpha1.FlowSchema, error)
-	List(opts v1.ListOptions) (*v1alpha1.FlowSchemaList, error)
-	Watch(opts v1.ListOptions) (watch.Interface, error)
-	Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.FlowSchema, err error)
+	Create(ctx context.Context, flowSchema *v1alpha1.FlowSchema, opts v1.CreateOptions) (*v1alpha1.FlowSchema, error)
+	Update(ctx context.Context, flowSchema *v1alpha1.FlowSchema, opts v1.UpdateOptions) (*v1alpha1.FlowSchema, error)
+	UpdateStatus(ctx context.Context, flowSchema *v1alpha1.FlowSchema, opts v1.UpdateOptions) (*v1alpha1.FlowSchema, error)
+	Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
+	DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
+	Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.FlowSchema, error)
+	List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.FlowSchemaList, error)
+	Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
+	Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.FlowSchema, err error)
 	FlowSchemaExpansion
 }
 
@@ -62,19 +63,19 @@ func newFlowSchemas(c *FlowcontrolV1alpha1Client) *flowSchemas {
 }
 
 // Get takes name of the flowSchema, and returns the corresponding flowSchema object, and an error if there is any.
-func (c *flowSchemas) Get(name string, options v1.GetOptions) (result *v1alpha1.FlowSchema, err error) {
+func (c *flowSchemas) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.FlowSchema, err error) {
 	result = &v1alpha1.FlowSchema{}
 	err = c.client.Get().
 		Resource("flowschemas").
 		Name(name).
 		VersionedParams(&options, scheme.ParameterCodec).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // List takes label and field selectors, and returns the list of FlowSchemas that match those selectors.
-func (c *flowSchemas) List(opts v1.ListOptions) (result *v1alpha1.FlowSchemaList, err error) {
+func (c *flowSchemas) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.FlowSchemaList, err error) {
 	var timeout time.Duration
 	if opts.TimeoutSeconds != nil {
 		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
@@ -84,13 +85,13 @@ func (c *flowSchemas) List(opts v1.ListOptions) (result *v1alpha1.FlowSchemaList
 		Resource("flowschemas").
 		VersionedParams(&opts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Watch returns a watch.Interface that watches the requested flowSchemas.
-func (c *flowSchemas) Watch(opts v1.ListOptions) (watch.Interface, error) {
+func (c *flowSchemas) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
 	var timeout time.Duration
 	if opts.TimeoutSeconds != nil {
 		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
@@ -100,81 +101,84 @@ func (c *flowSchemas) Watch(opts v1.ListOptions) (watch.Interface, error) {
 		Resource("flowschemas").
 		VersionedParams(&opts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Watch()
+		Watch(ctx)
 }
 
 // Create takes the representation of a flowSchema and creates it.  Returns the server's representation of the flowSchema, and an error, if there is any.
-func (c *flowSchemas) Create(flowSchema *v1alpha1.FlowSchema) (result *v1alpha1.FlowSchema, err error) {
+func (c *flowSchemas) Create(ctx context.Context, flowSchema *v1alpha1.FlowSchema, opts v1.CreateOptions) (result *v1alpha1.FlowSchema, err error) {
 	result = &v1alpha1.FlowSchema{}
 	err = c.client.Post().
 		Resource("flowschemas").
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(flowSchema).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Update takes the representation of a flowSchema and updates it. Returns the server's representation of the flowSchema, and an error, if there is any.
-func (c *flowSchemas) Update(flowSchema *v1alpha1.FlowSchema) (result *v1alpha1.FlowSchema, err error) {
+func (c *flowSchemas) Update(ctx context.Context, flowSchema *v1alpha1.FlowSchema, opts v1.UpdateOptions) (result *v1alpha1.FlowSchema, err error) {
 	result = &v1alpha1.FlowSchema{}
 	err = c.client.Put().
 		Resource("flowschemas").
 		Name(flowSchema.Name).
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(flowSchema).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // UpdateStatus was generated because the type contains a Status member.
 // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
-
-func (c *flowSchemas) UpdateStatus(flowSchema *v1alpha1.FlowSchema) (result *v1alpha1.FlowSchema, err error) {
+func (c *flowSchemas) UpdateStatus(ctx context.Context, flowSchema *v1alpha1.FlowSchema, opts v1.UpdateOptions) (result *v1alpha1.FlowSchema, err error) {
 	result = &v1alpha1.FlowSchema{}
 	err = c.client.Put().
 		Resource("flowschemas").
 		Name(flowSchema.Name).
 		SubResource("status").
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(flowSchema).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Delete takes name of the flowSchema and deletes it. Returns an error if one occurs.
-func (c *flowSchemas) Delete(name string, options *v1.DeleteOptions) error {
+func (c *flowSchemas) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
 	return c.client.Delete().
 		Resource("flowschemas").
 		Name(name).
-		Body(options).
-		Do().
+		Body(&opts).
+		Do(ctx).
 		Error()
 }
 
 // DeleteCollection deletes a collection of objects.
-func (c *flowSchemas) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
+func (c *flowSchemas) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
 	var timeout time.Duration
-	if listOptions.TimeoutSeconds != nil {
-		timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second
+	if listOpts.TimeoutSeconds != nil {
+		timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
 	}
 	return c.client.Delete().
 		Resource("flowschemas").
-		VersionedParams(&listOptions, scheme.ParameterCodec).
+		VersionedParams(&listOpts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Body(options).
-		Do().
+		Body(&opts).
+		Do(ctx).
 		Error()
 }
 
 // Patch applies the patch and returns the patched flowSchema.
-func (c *flowSchemas) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.FlowSchema, err error) {
+func (c *flowSchemas) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.FlowSchema, err error) {
 	result = &v1alpha1.FlowSchema{}
 	err = c.client.Patch(pt).
 		Resource("flowschemas").
-		SubResource(subresources...).
 		Name(name).
+		SubResource(subresources...).
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(data).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/prioritylevelconfiguration.go b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/prioritylevelconfiguration.go
index eb99cca6..1290e793 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/prioritylevelconfiguration.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/flowcontrol/v1alpha1/prioritylevelconfiguration.go
@@ -19,6 +19,7 @@ limitations under the License.
 package v1alpha1
 
 import (
+	"context"
 	"time"
 
 	v1alpha1 "k8s.io/api/flowcontrol/v1alpha1"
@@ -37,15 +38,15 @@ type PriorityLevelConfigurationsGetter interface {
 
 // PriorityLevelConfigurationInterface has methods to work with PriorityLevelConfiguration resources.
 type PriorityLevelConfigurationInterface interface {
-	Create(*v1alpha1.PriorityLevelConfiguration) (*v1alpha1.PriorityLevelConfiguration, error)
-	Update(*v1alpha1.PriorityLevelConfiguration) (*v1alpha1.PriorityLevelConfiguration, error)
-	UpdateStatus(*v1alpha1.PriorityLevelConfiguration) (*v1alpha1.PriorityLevelConfiguration, error)
-	Delete(name string, options *v1.DeleteOptions) error
-	DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error
-	Get(name string, options v1.GetOptions) (*v1alpha1.PriorityLevelConfiguration, error)
-	List(opts v1.ListOptions) (*v1alpha1.PriorityLevelConfigurationList, error)
-	Watch(opts v1.ListOptions) (watch.Interface, error)
-	Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.PriorityLevelConfiguration, err error)
+	Create(ctx context.Context, priorityLevelConfiguration *v1alpha1.PriorityLevelConfiguration, opts v1.CreateOptions) (*v1alpha1.PriorityLevelConfiguration, error)
+	Update(ctx context.Context, priorityLevelConfiguration *v1alpha1.PriorityLevelConfiguration, opts v1.UpdateOptions) (*v1alpha1.PriorityLevelConfiguration, error)
+	UpdateStatus(ctx context.Context, priorityLevelConfiguration *v1alpha1.PriorityLevelConfiguration, opts v1.UpdateOptions) (*v1alpha1.PriorityLevelConfiguration, error)
+	Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
+	DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
+	Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.PriorityLevelConfiguration, error)
+	List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.PriorityLevelConfigurationList, error)
+	Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
+	Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.PriorityLevelConfiguration, err error)
 	PriorityLevelConfigurationExpansion
 }
 
@@ -62,19 +63,19 @@ func newPriorityLevelConfigurations(c *FlowcontrolV1alpha1Client) *priorityLevel
 }
 
 // Get takes name of the priorityLevelConfiguration, and returns the corresponding priorityLevelConfiguration object, and an error if there is any.
-func (c *priorityLevelConfigurations) Get(name string, options v1.GetOptions) (result *v1alpha1.PriorityLevelConfiguration, err error) {
+func (c *priorityLevelConfigurations) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.PriorityLevelConfiguration, err error) {
 	result = &v1alpha1.PriorityLevelConfiguration{}
 	err = c.client.Get().
 		Resource("prioritylevelconfigurations").
 		Name(name).
 		VersionedParams(&options, scheme.ParameterCodec).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // List takes label and field selectors, and returns the list of PriorityLevelConfigurations that match those selectors.
-func (c *priorityLevelConfigurations) List(opts v1.ListOptions) (result *v1alpha1.PriorityLevelConfigurationList, err error) {
+func (c *priorityLevelConfigurations) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.PriorityLevelConfigurationList, err error) {
 	var timeout time.Duration
 	if opts.TimeoutSeconds != nil {
 		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
@@ -84,13 +85,13 @@ func (c *priorityLevelConfigurations) List(opts v1.ListOptions) (result *v1alpha
 		Resource("prioritylevelconfigurations").
 		VersionedParams(&opts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Watch returns a watch.Interface that watches the requested priorityLevelConfigurations.
-func (c *priorityLevelConfigurations) Watch(opts v1.ListOptions) (watch.Interface, error) {
+func (c *priorityLevelConfigurations) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
 	var timeout time.Duration
 	if opts.TimeoutSeconds != nil {
 		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
@@ -100,81 +101,84 @@ func (c *priorityLevelConfigurations) Watch(opts v1.ListOptions) (watch.Interfac
 		Resource("prioritylevelconfigurations").
 		VersionedParams(&opts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Watch()
+		Watch(ctx)
 }
 
 // Create takes the representation of a priorityLevelConfiguration and creates it.  Returns the server's representation of the priorityLevelConfiguration, and an error, if there is any.
-func (c *priorityLevelConfigurations) Create(priorityLevelConfiguration *v1alpha1.PriorityLevelConfiguration) (result *v1alpha1.PriorityLevelConfiguration, err error) {
+func (c *priorityLevelConfigurations) Create(ctx context.Context, priorityLevelConfiguration *v1alpha1.PriorityLevelConfiguration, opts v1.CreateOptions) (result *v1alpha1.PriorityLevelConfiguration, err error) {
 	result = &v1alpha1.PriorityLevelConfiguration{}
 	err = c.client.Post().
 		Resource("prioritylevelconfigurations").
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(priorityLevelConfiguration).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Update takes the representation of a priorityLevelConfiguration and updates it. Returns the server's representation of the priorityLevelConfiguration, and an error, if there is any.
-func (c *priorityLevelConfigurations) Update(priorityLevelConfiguration *v1alpha1.PriorityLevelConfiguration) (result *v1alpha1.PriorityLevelConfiguration, err error) {
+func (c *priorityLevelConfigurations) Update(ctx context.Context, priorityLevelConfiguration *v1alpha1.PriorityLevelConfiguration, opts v1.UpdateOptions) (result *v1alpha1.PriorityLevelConfiguration, err error) {
 	result = &v1alpha1.PriorityLevelConfiguration{}
 	err = c.client.Put().
 		Resource("prioritylevelconfigurations").
 		Name(priorityLevelConfiguration.Name).
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(priorityLevelConfiguration).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // UpdateStatus was generated because the type contains a Status member.
 // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
-
-func (c *priorityLevelConfigurations) UpdateStatus(priorityLevelConfiguration *v1alpha1.PriorityLevelConfiguration) (result *v1alpha1.PriorityLevelConfiguration, err error) {
+func (c *priorityLevelConfigurations) UpdateStatus(ctx context.Context, priorityLevelConfiguration *v1alpha1.PriorityLevelConfiguration, opts v1.UpdateOptions) (result *v1alpha1.PriorityLevelConfiguration, err error) {
 	result = &v1alpha1.PriorityLevelConfiguration{}
 	err = c.client.Put().
 		Resource("prioritylevelconfigurations").
 		Name(priorityLevelConfiguration.Name).
 		SubResource("status").
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(priorityLevelConfiguration).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Delete takes name of the priorityLevelConfiguration and deletes it. Returns an error if one occurs.
-func (c *priorityLevelConfigurations) Delete(name string, options *v1.DeleteOptions) error {
+func (c *priorityLevelConfigurations) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
 	return c.client.Delete().
 		Resource("prioritylevelconfigurations").
 		Name(name).
-		Body(options).
-		Do().
+		Body(&opts).
+		Do(ctx).
 		Error()
 }
 
 // DeleteCollection deletes a collection of objects.
-func (c *priorityLevelConfigurations) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
+func (c *priorityLevelConfigurations) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
 	var timeout time.Duration
-	if listOptions.TimeoutSeconds != nil {
-		timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second
+	if listOpts.TimeoutSeconds != nil {
+		timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
 	}
 	return c.client.Delete().
 		Resource("prioritylevelconfigurations").
-		VersionedParams(&listOptions, scheme.ParameterCodec).
+		VersionedParams(&listOpts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Body(options).
-		Do().
+		Body(&opts).
+		Do(ctx).
 		Error()
 }
 
 // Patch applies the patch and returns the patched priorityLevelConfiguration.
-func (c *priorityLevelConfigurations) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.PriorityLevelConfiguration, err error) {
+func (c *priorityLevelConfigurations) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.PriorityLevelConfiguration, err error) {
 	result = &v1alpha1.PriorityLevelConfiguration{}
 	err = c.client.Patch(pt).
 		Resource("prioritylevelconfigurations").
-		SubResource(subresources...).
 		Name(name).
+		SubResource(subresources...).
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(data).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/networkpolicy.go b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/networkpolicy.go
index 3f39be95..19c0c880 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/networkpolicy.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/networkpolicy.go
@@ -19,6 +19,7 @@ limitations under the License.
 package v1
 
 import (
+	"context"
 	"time"
 
 	v1 "k8s.io/api/networking/v1"
@@ -37,14 +38,14 @@ type NetworkPoliciesGetter interface {
 
 // NetworkPolicyInterface has methods to work with NetworkPolicy resources.
 type NetworkPolicyInterface interface {
-	Create(*v1.NetworkPolicy) (*v1.NetworkPolicy, error)
-	Update(*v1.NetworkPolicy) (*v1.NetworkPolicy, error)
-	Delete(name string, options *metav1.DeleteOptions) error
-	DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error
-	Get(name string, options metav1.GetOptions) (*v1.NetworkPolicy, error)
-	List(opts metav1.ListOptions) (*v1.NetworkPolicyList, error)
-	Watch(opts metav1.ListOptions) (watch.Interface, error)
-	Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.NetworkPolicy, err error)
+	Create(ctx context.Context, networkPolicy *v1.NetworkPolicy, opts metav1.CreateOptions) (*v1.NetworkPolicy, error)
+	Update(ctx context.Context, networkPolicy *v1.NetworkPolicy, opts metav1.UpdateOptions) (*v1.NetworkPolicy, error)
+	Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error
+	DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error
+	Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.NetworkPolicy, error)
+	List(ctx context.Context, opts metav1.ListOptions) (*v1.NetworkPolicyList, error)
+	Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error)
+	Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.NetworkPolicy, err error)
 	NetworkPolicyExpansion
 }
 
@@ -63,20 +64,20 @@ func newNetworkPolicies(c *NetworkingV1Client, namespace string) *networkPolicie
 }
 
 // Get takes name of the networkPolicy, and returns the corresponding networkPolicy object, and an error if there is any.
-func (c *networkPolicies) Get(name string, options metav1.GetOptions) (result *v1.NetworkPolicy, err error) {
+func (c *networkPolicies) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.NetworkPolicy, err error) {
 	result = &v1.NetworkPolicy{}
 	err = c.client.Get().
 		Namespace(c.ns).
 		Resource("networkpolicies").
 		Name(name).
 		VersionedParams(&options, scheme.ParameterCodec).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // List takes label and field selectors, and returns the list of NetworkPolicies that match those selectors.
-func (c *networkPolicies) List(opts metav1.ListOptions) (result *v1.NetworkPolicyList, err error) {
+func (c *networkPolicies) List(ctx context.Context, opts metav1.ListOptions) (result *v1.NetworkPolicyList, err error) {
 	var timeout time.Duration
 	if opts.TimeoutSeconds != nil {
 		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
@@ -87,13 +88,13 @@ func (c *networkPolicies) List(opts metav1.ListOptions) (result *v1.NetworkPolic
 		Resource("networkpolicies").
 		VersionedParams(&opts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Watch returns a watch.Interface that watches the requested networkPolicies.
-func (c *networkPolicies) Watch(opts metav1.ListOptions) (watch.Interface, error) {
+func (c *networkPolicies) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
 	var timeout time.Duration
 	if opts.TimeoutSeconds != nil {
 		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
@@ -104,71 +105,74 @@ func (c *networkPolicies) Watch(opts metav1.ListOptions) (watch.Interface, error
 		Resource("networkpolicies").
 		VersionedParams(&opts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Watch()
+		Watch(ctx)
 }
 
 // Create takes the representation of a networkPolicy and creates it.  Returns the server's representation of the networkPolicy, and an error, if there is any.
-func (c *networkPolicies) Create(networkPolicy *v1.NetworkPolicy) (result *v1.NetworkPolicy, err error) {
+func (c *networkPolicies) Create(ctx context.Context, networkPolicy *v1.NetworkPolicy, opts metav1.CreateOptions) (result *v1.NetworkPolicy, err error) {
 	result = &v1.NetworkPolicy{}
 	err = c.client.Post().
 		Namespace(c.ns).
 		Resource("networkpolicies").
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(networkPolicy).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Update takes the representation of a networkPolicy and updates it. Returns the server's representation of the networkPolicy, and an error, if there is any.
-func (c *networkPolicies) Update(networkPolicy *v1.NetworkPolicy) (result *v1.NetworkPolicy, err error) {
+func (c *networkPolicies) Update(ctx context.Context, networkPolicy *v1.NetworkPolicy, opts metav1.UpdateOptions) (result *v1.NetworkPolicy, err error) {
 	result = &v1.NetworkPolicy{}
 	err = c.client.Put().
 		Namespace(c.ns).
 		Resource("networkpolicies").
 		Name(networkPolicy.Name).
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(networkPolicy).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Delete takes name of the networkPolicy and deletes it. Returns an error if one occurs.
-func (c *networkPolicies) Delete(name string, options *metav1.DeleteOptions) error {
+func (c *networkPolicies) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error {
 	return c.client.Delete().
 		Namespace(c.ns).
 		Resource("networkpolicies").
 		Name(name).
-		Body(options).
-		Do().
+		Body(&opts).
+		Do(ctx).
 		Error()
 }
 
 // DeleteCollection deletes a collection of objects.
-func (c *networkPolicies) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error {
+func (c *networkPolicies) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error {
 	var timeout time.Duration
-	if listOptions.TimeoutSeconds != nil {
-		timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second
+	if listOpts.TimeoutSeconds != nil {
+		timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
 	}
 	return c.client.Delete().
 		Namespace(c.ns).
 		Resource("networkpolicies").
-		VersionedParams(&listOptions, scheme.ParameterCodec).
+		VersionedParams(&listOpts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Body(options).
-		Do().
+		Body(&opts).
+		Do(ctx).
 		Error()
 }
 
 // Patch applies the patch and returns the patched networkPolicy.
-func (c *networkPolicies) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.NetworkPolicy, err error) {
+func (c *networkPolicies) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.NetworkPolicy, err error) {
 	result = &v1.NetworkPolicy{}
 	err = c.client.Patch(pt).
 		Namespace(c.ns).
 		Resource("networkpolicies").
-		SubResource(subresources...).
 		Name(name).
+		SubResource(subresources...).
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(data).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/generated_expansion.go
index 1442649b..f74c7257 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/generated_expansion.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/generated_expansion.go
@@ -19,3 +19,5 @@ limitations under the License.
 package v1beta1
 
 type IngressExpansion interface{}
+
+type IngressClassExpansion interface{}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/ingress.go b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/ingress.go
index 8d76678f..0857c05d 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/ingress.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/ingress.go
@@ -19,6 +19,7 @@ limitations under the License.
 package v1beta1
 
 import (
+	"context"
 	"time"
 
 	v1beta1 "k8s.io/api/networking/v1beta1"
@@ -37,15 +38,15 @@ type IngressesGetter interface {
 
 // IngressInterface has methods to work with Ingress resources.
 type IngressInterface interface {
-	Create(*v1beta1.Ingress) (*v1beta1.Ingress, error)
-	Update(*v1beta1.Ingress) (*v1beta1.Ingress, error)
-	UpdateStatus(*v1beta1.Ingress) (*v1beta1.Ingress, error)
-	Delete(name string, options *v1.DeleteOptions) error
-	DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error
-	Get(name string, options v1.GetOptions) (*v1beta1.Ingress, error)
-	List(opts v1.ListOptions) (*v1beta1.IngressList, error)
-	Watch(opts v1.ListOptions) (watch.Interface, error)
-	Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.Ingress, err error)
+	Create(ctx context.Context, ingress *v1beta1.Ingress, opts v1.CreateOptions) (*v1beta1.Ingress, error)
+	Update(ctx context.Context, ingress *v1beta1.Ingress, opts v1.UpdateOptions) (*v1beta1.Ingress, error)
+	UpdateStatus(ctx context.Context, ingress *v1beta1.Ingress, opts v1.UpdateOptions) (*v1beta1.Ingress, error)
+	Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
+	DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
+	Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.Ingress, error)
+	List(ctx context.Context, opts v1.ListOptions) (*v1beta1.IngressList, error)
+	Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
+	Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.Ingress, err error)
 	IngressExpansion
 }
 
@@ -64,20 +65,20 @@ func newIngresses(c *NetworkingV1beta1Client, namespace string) *ingresses {
 }
 
 // Get takes name of the ingress, and returns the corresponding ingress object, and an error if there is any.
-func (c *ingresses) Get(name string, options v1.GetOptions) (result *v1beta1.Ingress, err error) {
+func (c *ingresses) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.Ingress, err error) {
 	result = &v1beta1.Ingress{}
 	err = c.client.Get().
 		Namespace(c.ns).
 		Resource("ingresses").
 		Name(name).
 		VersionedParams(&options, scheme.ParameterCodec).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // List takes label and field selectors, and returns the list of Ingresses that match those selectors.
-func (c *ingresses) List(opts v1.ListOptions) (result *v1beta1.IngressList, err error) {
+func (c *ingresses) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.IngressList, err error) {
 	var timeout time.Duration
 	if opts.TimeoutSeconds != nil {
 		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
@@ -88,13 +89,13 @@ func (c *ingresses) List(opts v1.ListOptions) (result *v1beta1.IngressList, err
 		Resource("ingresses").
 		VersionedParams(&opts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Watch returns a watch.Interface that watches the requested ingresses.
-func (c *ingresses) Watch(opts v1.ListOptions) (watch.Interface, error) {
+func (c *ingresses) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
 	var timeout time.Duration
 	if opts.TimeoutSeconds != nil {
 		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
@@ -105,87 +106,90 @@ func (c *ingresses) Watch(opts v1.ListOptions) (watch.Interface, error) {
 		Resource("ingresses").
 		VersionedParams(&opts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Watch()
+		Watch(ctx)
 }
 
 // Create takes the representation of a ingress and creates it.  Returns the server's representation of the ingress, and an error, if there is any.
-func (c *ingresses) Create(ingress *v1beta1.Ingress) (result *v1beta1.Ingress, err error) {
+func (c *ingresses) Create(ctx context.Context, ingress *v1beta1.Ingress, opts v1.CreateOptions) (result *v1beta1.Ingress, err error) {
 	result = &v1beta1.Ingress{}
 	err = c.client.Post().
 		Namespace(c.ns).
 		Resource("ingresses").
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(ingress).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Update takes the representation of a ingress and updates it. Returns the server's representation of the ingress, and an error, if there is any.
-func (c *ingresses) Update(ingress *v1beta1.Ingress) (result *v1beta1.Ingress, err error) {
+func (c *ingresses) Update(ctx context.Context, ingress *v1beta1.Ingress, opts v1.UpdateOptions) (result *v1beta1.Ingress, err error) {
 	result = &v1beta1.Ingress{}
 	err = c.client.Put().
 		Namespace(c.ns).
 		Resource("ingresses").
 		Name(ingress.Name).
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(ingress).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // UpdateStatus was generated because the type contains a Status member.
 // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
-
-func (c *ingresses) UpdateStatus(ingress *v1beta1.Ingress) (result *v1beta1.Ingress, err error) {
+func (c *ingresses) UpdateStatus(ctx context.Context, ingress *v1beta1.Ingress, opts v1.UpdateOptions) (result *v1beta1.Ingress, err error) {
 	result = &v1beta1.Ingress{}
 	err = c.client.Put().
 		Namespace(c.ns).
 		Resource("ingresses").
 		Name(ingress.Name).
 		SubResource("status").
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(ingress).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Delete takes name of the ingress and deletes it. Returns an error if one occurs.
-func (c *ingresses) Delete(name string, options *v1.DeleteOptions) error {
+func (c *ingresses) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
 	return c.client.Delete().
 		Namespace(c.ns).
 		Resource("ingresses").
 		Name(name).
-		Body(options).
-		Do().
+		Body(&opts).
+		Do(ctx).
 		Error()
 }
 
 // DeleteCollection deletes a collection of objects.
-func (c *ingresses) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
+func (c *ingresses) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
 	var timeout time.Duration
-	if listOptions.TimeoutSeconds != nil {
-		timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second
+	if listOpts.TimeoutSeconds != nil {
+		timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
 	}
 	return c.client.Delete().
 		Namespace(c.ns).
 		Resource("ingresses").
-		VersionedParams(&listOptions, scheme.ParameterCodec).
+		VersionedParams(&listOpts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Body(options).
-		Do().
+		Body(&opts).
+		Do(ctx).
 		Error()
 }
 
 // Patch applies the patch and returns the patched ingress.
-func (c *ingresses) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.Ingress, err error) {
+func (c *ingresses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.Ingress, err error) {
 	result = &v1beta1.Ingress{}
 	err = c.client.Patch(pt).
 		Namespace(c.ns).
 		Resource("ingresses").
-		SubResource(subresources...).
 		Name(name).
+		SubResource(subresources...).
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(data).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/ingressclass.go b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/ingressclass.go
new file mode 100644
index 00000000..2a423742
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/ingressclass.go
@@ -0,0 +1,168 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1beta1
+
+import (
+	"context"
+	"time"
+
+	v1beta1 "k8s.io/api/networking/v1beta1"
+	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	types "k8s.io/apimachinery/pkg/types"
+	watch "k8s.io/apimachinery/pkg/watch"
+	scheme "k8s.io/client-go/kubernetes/scheme"
+	rest "k8s.io/client-go/rest"
+)
+
+// IngressClassesGetter has a method to return a IngressClassInterface.
+// A group's client should implement this interface.
+type IngressClassesGetter interface {
+	IngressClasses() IngressClassInterface
+}
+
+// IngressClassInterface has methods to work with IngressClass resources.
+type IngressClassInterface interface {
+	Create(ctx context.Context, ingressClass *v1beta1.IngressClass, opts v1.CreateOptions) (*v1beta1.IngressClass, error)
+	Update(ctx context.Context, ingressClass *v1beta1.IngressClass, opts v1.UpdateOptions) (*v1beta1.IngressClass, error)
+	Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
+	DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
+	Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.IngressClass, error)
+	List(ctx context.Context, opts v1.ListOptions) (*v1beta1.IngressClassList, error)
+	Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
+	Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.IngressClass, err error)
+	IngressClassExpansion
+}
+
+// ingressClasses implements IngressClassInterface
+type ingressClasses struct {
+	client rest.Interface
+}
+
+// newIngressClasses returns a IngressClasses
+func newIngressClasses(c *NetworkingV1beta1Client) *ingressClasses {
+	return &ingressClasses{
+		client: c.RESTClient(),
+	}
+}
+
+// Get takes name of the ingressClass, and returns the corresponding ingressClass object, and an error if there is any.
+func (c *ingressClasses) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.IngressClass, err error) {
+	result = &v1beta1.IngressClass{}
+	err = c.client.Get().
+		Resource("ingressclasses").
+		Name(name).
+		VersionedParams(&options, scheme.ParameterCodec).
+		Do(ctx).
+		Into(result)
+	return
+}
+
+// List takes label and field selectors, and returns the list of IngressClasses that match those selectors.
+func (c *ingressClasses) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.IngressClassList, err error) {
+	var timeout time.Duration
+	if opts.TimeoutSeconds != nil {
+		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+	}
+	result = &v1beta1.IngressClassList{}
+	err = c.client.Get().
+		Resource("ingressclasses").
+		VersionedParams(&opts, scheme.ParameterCodec).
+		Timeout(timeout).
+		Do(ctx).
+		Into(result)
+	return
+}
+
+// Watch returns a watch.Interface that watches the requested ingressClasses.
+func (c *ingressClasses) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
+	var timeout time.Duration
+	if opts.TimeoutSeconds != nil {
+		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+	}
+	opts.Watch = true
+	return c.client.Get().
+		Resource("ingressclasses").
+		VersionedParams(&opts, scheme.ParameterCodec).
+		Timeout(timeout).
+		Watch(ctx)
+}
+
+// Create takes the representation of a ingressClass and creates it.  Returns the server's representation of the ingressClass, and an error, if there is any.
+func (c *ingressClasses) Create(ctx context.Context, ingressClass *v1beta1.IngressClass, opts v1.CreateOptions) (result *v1beta1.IngressClass, err error) {
+	result = &v1beta1.IngressClass{}
+	err = c.client.Post().
+		Resource("ingressclasses").
+		VersionedParams(&opts, scheme.ParameterCodec).
+		Body(ingressClass).
+		Do(ctx).
+		Into(result)
+	return
+}
+
+// Update takes the representation of a ingressClass and updates it. Returns the server's representation of the ingressClass, and an error, if there is any.
+func (c *ingressClasses) Update(ctx context.Context, ingressClass *v1beta1.IngressClass, opts v1.UpdateOptions) (result *v1beta1.IngressClass, err error) {
+	result = &v1beta1.IngressClass{}
+	err = c.client.Put().
+		Resource("ingressclasses").
+		Name(ingressClass.Name).
+		VersionedParams(&opts, scheme.ParameterCodec).
+		Body(ingressClass).
+		Do(ctx).
+		Into(result)
+	return
+}
+
+// Delete takes name of the ingressClass and deletes it. Returns an error if one occurs.
+func (c *ingressClasses) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
+	return c.client.Delete().
+		Resource("ingressclasses").
+		Name(name).
+		Body(&opts).
+		Do(ctx).
+		Error()
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *ingressClasses) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
+	var timeout time.Duration
+	if listOpts.TimeoutSeconds != nil {
+		timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
+	}
+	return c.client.Delete().
+		Resource("ingressclasses").
+		VersionedParams(&listOpts, scheme.ParameterCodec).
+		Timeout(timeout).
+		Body(&opts).
+		Do(ctx).
+		Error()
+}
+
+// Patch applies the patch and returns the patched ingressClass.
+func (c *ingressClasses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.IngressClass, err error) {
+	result = &v1beta1.IngressClass{}
+	err = c.client.Patch(pt).
+		Resource("ingressclasses").
+		Name(name).
+		SubResource(subresources...).
+		VersionedParams(&opts, scheme.ParameterCodec).
+		Body(data).
+		Do(ctx).
+		Into(result)
+	return
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/networking_client.go b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/networking_client.go
index ee523f8e..849ac219 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/networking_client.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/networking_client.go
@@ -27,6 +27,7 @@ import (
 type NetworkingV1beta1Interface interface {
 	RESTClient() rest.Interface
 	IngressesGetter
+	IngressClassesGetter
 }
 
 // NetworkingV1beta1Client is used to interact with features provided by the networking.k8s.io group.
@@ -38,6 +39,10 @@ func (c *NetworkingV1beta1Client) Ingresses(namespace string) IngressInterface {
 	return newIngresses(c, namespace)
 }
 
+func (c *NetworkingV1beta1Client) IngressClasses() IngressClassInterface {
+	return newIngressClasses(c)
+}
+
 // NewForConfig creates a new NetworkingV1beta1Client for the given config.
 func NewForConfig(c *rest.Config) (*NetworkingV1beta1Client, error) {
 	config := *c
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/node/v1alpha1/runtimeclass.go b/vendor/k8s.io/client-go/kubernetes/typed/node/v1alpha1/runtimeclass.go
index 044460ec..402c23e8 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/node/v1alpha1/runtimeclass.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/node/v1alpha1/runtimeclass.go
@@ -19,6 +19,7 @@ limitations under the License.
 package v1alpha1
 
 import (
+	"context"
 	"time"
 
 	v1alpha1 "k8s.io/api/node/v1alpha1"
@@ -37,14 +38,14 @@ type RuntimeClassesGetter interface {
 
 // RuntimeClassInterface has methods to work with RuntimeClass resources.
 type RuntimeClassInterface interface {
-	Create(*v1alpha1.RuntimeClass) (*v1alpha1.RuntimeClass, error)
-	Update(*v1alpha1.RuntimeClass) (*v1alpha1.RuntimeClass, error)
-	Delete(name string, options *v1.DeleteOptions) error
-	DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error
-	Get(name string, options v1.GetOptions) (*v1alpha1.RuntimeClass, error)
-	List(opts v1.ListOptions) (*v1alpha1.RuntimeClassList, error)
-	Watch(opts v1.ListOptions) (watch.Interface, error)
-	Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.RuntimeClass, err error)
+	Create(ctx context.Context, runtimeClass *v1alpha1.RuntimeClass, opts v1.CreateOptions) (*v1alpha1.RuntimeClass, error)
+	Update(ctx context.Context, runtimeClass *v1alpha1.RuntimeClass, opts v1.UpdateOptions) (*v1alpha1.RuntimeClass, error)
+	Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
+	DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
+	Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.RuntimeClass, error)
+	List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.RuntimeClassList, error)
+	Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
+	Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.RuntimeClass, err error)
 	RuntimeClassExpansion
 }
 
@@ -61,19 +62,19 @@ func newRuntimeClasses(c *NodeV1alpha1Client) *runtimeClasses {
 }
 
 // Get takes name of the runtimeClass, and returns the corresponding runtimeClass object, and an error if there is any.
-func (c *runtimeClasses) Get(name string, options v1.GetOptions) (result *v1alpha1.RuntimeClass, err error) {
+func (c *runtimeClasses) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.RuntimeClass, err error) {
 	result = &v1alpha1.RuntimeClass{}
 	err = c.client.Get().
 		Resource("runtimeclasses").
 		Name(name).
 		VersionedParams(&options, scheme.ParameterCodec).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // List takes label and field selectors, and returns the list of RuntimeClasses that match those selectors.
-func (c *runtimeClasses) List(opts v1.ListOptions) (result *v1alpha1.RuntimeClassList, err error) {
+func (c *runtimeClasses) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.RuntimeClassList, err error) {
 	var timeout time.Duration
 	if opts.TimeoutSeconds != nil {
 		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
@@ -83,13 +84,13 @@ func (c *runtimeClasses) List(opts v1.ListOptions) (result *v1alpha1.RuntimeClas
 		Resource("runtimeclasses").
 		VersionedParams(&opts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Watch returns a watch.Interface that watches the requested runtimeClasses.
-func (c *runtimeClasses) Watch(opts v1.ListOptions) (watch.Interface, error) {
+func (c *runtimeClasses) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
 	var timeout time.Duration
 	if opts.TimeoutSeconds != nil {
 		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
@@ -99,66 +100,69 @@ func (c *runtimeClasses) Watch(opts v1.ListOptions) (watch.Interface, error) {
 		Resource("runtimeclasses").
 		VersionedParams(&opts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Watch()
+		Watch(ctx)
 }
 
 // Create takes the representation of a runtimeClass and creates it.  Returns the server's representation of the runtimeClass, and an error, if there is any.
-func (c *runtimeClasses) Create(runtimeClass *v1alpha1.RuntimeClass) (result *v1alpha1.RuntimeClass, err error) {
+func (c *runtimeClasses) Create(ctx context.Context, runtimeClass *v1alpha1.RuntimeClass, opts v1.CreateOptions) (result *v1alpha1.RuntimeClass, err error) {
 	result = &v1alpha1.RuntimeClass{}
 	err = c.client.Post().
 		Resource("runtimeclasses").
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(runtimeClass).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Update takes the representation of a runtimeClass and updates it. Returns the server's representation of the runtimeClass, and an error, if there is any.
-func (c *runtimeClasses) Update(runtimeClass *v1alpha1.RuntimeClass) (result *v1alpha1.RuntimeClass, err error) {
+func (c *runtimeClasses) Update(ctx context.Context, runtimeClass *v1alpha1.RuntimeClass, opts v1.UpdateOptions) (result *v1alpha1.RuntimeClass, err error) {
 	result = &v1alpha1.RuntimeClass{}
 	err = c.client.Put().
 		Resource("runtimeclasses").
 		Name(runtimeClass.Name).
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(runtimeClass).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Delete takes name of the runtimeClass and deletes it. Returns an error if one occurs.
-func (c *runtimeClasses) Delete(name string, options *v1.DeleteOptions) error {
+func (c *runtimeClasses) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
 	return c.client.Delete().
 		Resource("runtimeclasses").
 		Name(name).
-		Body(options).
-		Do().
+		Body(&opts).
+		Do(ctx).
 		Error()
 }
 
 // DeleteCollection deletes a collection of objects.
-func (c *runtimeClasses) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
+func (c *runtimeClasses) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
 	var timeout time.Duration
-	if listOptions.TimeoutSeconds != nil {
-		timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second
+	if listOpts.TimeoutSeconds != nil {
+		timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
 	}
 	return c.client.Delete().
 		Resource("runtimeclasses").
-		VersionedParams(&listOptions, scheme.ParameterCodec).
+		VersionedParams(&listOpts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Body(options).
-		Do().
+		Body(&opts).
+		Do(ctx).
 		Error()
 }
 
 // Patch applies the patch and returns the patched runtimeClass.
-func (c *runtimeClasses) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.RuntimeClass, err error) {
+func (c *runtimeClasses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.RuntimeClass, err error) {
 	result = &v1alpha1.RuntimeClass{}
 	err = c.client.Patch(pt).
 		Resource("runtimeclasses").
-		SubResource(subresources...).
 		Name(name).
+		SubResource(subresources...).
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(data).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/node/v1beta1/runtimeclass.go b/vendor/k8s.io/client-go/kubernetes/typed/node/v1beta1/runtimeclass.go
index b3f7c497..b0d1886e 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/node/v1beta1/runtimeclass.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/node/v1beta1/runtimeclass.go
@@ -19,6 +19,7 @@ limitations under the License.
 package v1beta1
 
 import (
+	"context"
 	"time"
 
 	v1beta1 "k8s.io/api/node/v1beta1"
@@ -37,14 +38,14 @@ type RuntimeClassesGetter interface {
 
 // RuntimeClassInterface has methods to work with RuntimeClass resources.
 type RuntimeClassInterface interface {
-	Create(*v1beta1.RuntimeClass) (*v1beta1.RuntimeClass, error)
-	Update(*v1beta1.RuntimeClass) (*v1beta1.RuntimeClass, error)
-	Delete(name string, options *v1.DeleteOptions) error
-	DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error
-	Get(name string, options v1.GetOptions) (*v1beta1.RuntimeClass, error)
-	List(opts v1.ListOptions) (*v1beta1.RuntimeClassList, error)
-	Watch(opts v1.ListOptions) (watch.Interface, error)
-	Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.RuntimeClass, err error)
+	Create(ctx context.Context, runtimeClass *v1beta1.RuntimeClass, opts v1.CreateOptions) (*v1beta1.RuntimeClass, error)
+	Update(ctx context.Context, runtimeClass *v1beta1.RuntimeClass, opts v1.UpdateOptions) (*v1beta1.RuntimeClass, error)
+	Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
+	DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
+	Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.RuntimeClass, error)
+	List(ctx context.Context, opts v1.ListOptions) (*v1beta1.RuntimeClassList, error)
+	Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
+	Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.RuntimeClass, err error)
 	RuntimeClassExpansion
 }
 
@@ -61,19 +62,19 @@ func newRuntimeClasses(c *NodeV1beta1Client) *runtimeClasses {
 }
 
 // Get takes name of the runtimeClass, and returns the corresponding runtimeClass object, and an error if there is any.
-func (c *runtimeClasses) Get(name string, options v1.GetOptions) (result *v1beta1.RuntimeClass, err error) {
+func (c *runtimeClasses) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.RuntimeClass, err error) {
 	result = &v1beta1.RuntimeClass{}
 	err = c.client.Get().
 		Resource("runtimeclasses").
 		Name(name).
 		VersionedParams(&options, scheme.ParameterCodec).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // List takes label and field selectors, and returns the list of RuntimeClasses that match those selectors.
-func (c *runtimeClasses) List(opts v1.ListOptions) (result *v1beta1.RuntimeClassList, err error) {
+func (c *runtimeClasses) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.RuntimeClassList, err error) {
 	var timeout time.Duration
 	if opts.TimeoutSeconds != nil {
 		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
@@ -83,13 +84,13 @@ func (c *runtimeClasses) List(opts v1.ListOptions) (result *v1beta1.RuntimeClass
 		Resource("runtimeclasses").
 		VersionedParams(&opts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Watch returns a watch.Interface that watches the requested runtimeClasses.
-func (c *runtimeClasses) Watch(opts v1.ListOptions) (watch.Interface, error) {
+func (c *runtimeClasses) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
 	var timeout time.Duration
 	if opts.TimeoutSeconds != nil {
 		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
@@ -99,66 +100,69 @@ func (c *runtimeClasses) Watch(opts v1.ListOptions) (watch.Interface, error) {
 		Resource("runtimeclasses").
 		VersionedParams(&opts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Watch()
+		Watch(ctx)
 }
 
 // Create takes the representation of a runtimeClass and creates it.  Returns the server's representation of the runtimeClass, and an error, if there is any.
-func (c *runtimeClasses) Create(runtimeClass *v1beta1.RuntimeClass) (result *v1beta1.RuntimeClass, err error) {
+func (c *runtimeClasses) Create(ctx context.Context, runtimeClass *v1beta1.RuntimeClass, opts v1.CreateOptions) (result *v1beta1.RuntimeClass, err error) {
 	result = &v1beta1.RuntimeClass{}
 	err = c.client.Post().
 		Resource("runtimeclasses").
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(runtimeClass).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Update takes the representation of a runtimeClass and updates it. Returns the server's representation of the runtimeClass, and an error, if there is any.
-func (c *runtimeClasses) Update(runtimeClass *v1beta1.RuntimeClass) (result *v1beta1.RuntimeClass, err error) {
+func (c *runtimeClasses) Update(ctx context.Context, runtimeClass *v1beta1.RuntimeClass, opts v1.UpdateOptions) (result *v1beta1.RuntimeClass, err error) {
 	result = &v1beta1.RuntimeClass{}
 	err = c.client.Put().
 		Resource("runtimeclasses").
 		Name(runtimeClass.Name).
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(runtimeClass).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Delete takes name of the runtimeClass and deletes it. Returns an error if one occurs.
-func (c *runtimeClasses) Delete(name string, options *v1.DeleteOptions) error {
+func (c *runtimeClasses) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
 	return c.client.Delete().
 		Resource("runtimeclasses").
 		Name(name).
-		Body(options).
-		Do().
+		Body(&opts).
+		Do(ctx).
 		Error()
 }
 
 // DeleteCollection deletes a collection of objects.
-func (c *runtimeClasses) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
+func (c *runtimeClasses) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
 	var timeout time.Duration
-	if listOptions.TimeoutSeconds != nil {
-		timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second
+	if listOpts.TimeoutSeconds != nil {
+		timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
 	}
 	return c.client.Delete().
 		Resource("runtimeclasses").
-		VersionedParams(&listOptions, scheme.ParameterCodec).
+		VersionedParams(&listOpts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Body(options).
-		Do().
+		Body(&opts).
+		Do(ctx).
 		Error()
 }
 
 // Patch applies the patch and returns the patched runtimeClass.
-func (c *runtimeClasses) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.RuntimeClass, err error) {
+func (c *runtimeClasses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.RuntimeClass, err error) {
 	result = &v1beta1.RuntimeClass{}
 	err = c.client.Patch(pt).
 		Resource("runtimeclasses").
-		SubResource(subresources...).
 		Name(name).
+		SubResource(subresources...).
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(data).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/eviction_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/eviction_expansion.go
index 40bad265..c003671f 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/eviction_expansion.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/eviction_expansion.go
@@ -17,15 +17,17 @@ limitations under the License.
 package v1beta1
 
 import (
+	"context"
+
 	policy "k8s.io/api/policy/v1beta1"
 )
 
 // The EvictionExpansion interface allows manually adding extra methods to the ScaleInterface.
 type EvictionExpansion interface {
-	Evict(eviction *policy.Eviction) error
+	Evict(ctx context.Context, eviction *policy.Eviction) error
 }
 
-func (c *evictions) Evict(eviction *policy.Eviction) error {
+func (c *evictions) Evict(ctx context.Context, eviction *policy.Eviction) error {
 	return c.client.Post().
 		AbsPath("/api/v1").
 		Namespace(eviction.Namespace).
@@ -33,6 +35,6 @@ func (c *evictions) Evict(eviction *policy.Eviction) error {
 		Name(eviction.Name).
 		SubResource("eviction").
 		Body(eviction).
-		Do().
+		Do(ctx).
 		Error()
 }
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/poddisruptionbudget.go b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/poddisruptionbudget.go
index 864af9a2..95b7ff1b 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/poddisruptionbudget.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/poddisruptionbudget.go
@@ -19,6 +19,7 @@ limitations under the License.
 package v1beta1
 
 import (
+	"context"
 	"time"
 
 	v1beta1 "k8s.io/api/policy/v1beta1"
@@ -37,15 +38,15 @@ type PodDisruptionBudgetsGetter interface {
 
 // PodDisruptionBudgetInterface has methods to work with PodDisruptionBudget resources.
 type PodDisruptionBudgetInterface interface {
-	Create(*v1beta1.PodDisruptionBudget) (*v1beta1.PodDisruptionBudget, error)
-	Update(*v1beta1.PodDisruptionBudget) (*v1beta1.PodDisruptionBudget, error)
-	UpdateStatus(*v1beta1.PodDisruptionBudget) (*v1beta1.PodDisruptionBudget, error)
-	Delete(name string, options *v1.DeleteOptions) error
-	DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error
-	Get(name string, options v1.GetOptions) (*v1beta1.PodDisruptionBudget, error)
-	List(opts v1.ListOptions) (*v1beta1.PodDisruptionBudgetList, error)
-	Watch(opts v1.ListOptions) (watch.Interface, error)
-	Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.PodDisruptionBudget, err error)
+	Create(ctx context.Context, podDisruptionBudget *v1beta1.PodDisruptionBudget, opts v1.CreateOptions) (*v1beta1.PodDisruptionBudget, error)
+	Update(ctx context.Context, podDisruptionBudget *v1beta1.PodDisruptionBudget, opts v1.UpdateOptions) (*v1beta1.PodDisruptionBudget, error)
+	UpdateStatus(ctx context.Context, podDisruptionBudget *v1beta1.PodDisruptionBudget, opts v1.UpdateOptions) (*v1beta1.PodDisruptionBudget, error)
+	Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
+	DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
+	Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.PodDisruptionBudget, error)
+	List(ctx context.Context, opts v1.ListOptions) (*v1beta1.PodDisruptionBudgetList, error)
+	Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
+	Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.PodDisruptionBudget, err error)
 	PodDisruptionBudgetExpansion
 }
 
@@ -64,20 +65,20 @@ func newPodDisruptionBudgets(c *PolicyV1beta1Client, namespace string) *podDisru
 }
 
 // Get takes name of the podDisruptionBudget, and returns the corresponding podDisruptionBudget object, and an error if there is any.
-func (c *podDisruptionBudgets) Get(name string, options v1.GetOptions) (result *v1beta1.PodDisruptionBudget, err error) {
+func (c *podDisruptionBudgets) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.PodDisruptionBudget, err error) {
 	result = &v1beta1.PodDisruptionBudget{}
 	err = c.client.Get().
 		Namespace(c.ns).
 		Resource("poddisruptionbudgets").
 		Name(name).
 		VersionedParams(&options, scheme.ParameterCodec).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // List takes label and field selectors, and returns the list of PodDisruptionBudgets that match those selectors.
-func (c *podDisruptionBudgets) List(opts v1.ListOptions) (result *v1beta1.PodDisruptionBudgetList, err error) {
+func (c *podDisruptionBudgets) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.PodDisruptionBudgetList, err error) {
 	var timeout time.Duration
 	if opts.TimeoutSeconds != nil {
 		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
@@ -88,13 +89,13 @@ func (c *podDisruptionBudgets) List(opts v1.ListOptions) (result *v1beta1.PodDis
 		Resource("poddisruptionbudgets").
 		VersionedParams(&opts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Watch returns a watch.Interface that watches the requested podDisruptionBudgets.
-func (c *podDisruptionBudgets) Watch(opts v1.ListOptions) (watch.Interface, error) {
+func (c *podDisruptionBudgets) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
 	var timeout time.Duration
 	if opts.TimeoutSeconds != nil {
 		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
@@ -105,87 +106,90 @@ func (c *podDisruptionBudgets) Watch(opts v1.ListOptions) (watch.Interface, erro
 		Resource("poddisruptionbudgets").
 		VersionedParams(&opts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Watch()
+		Watch(ctx)
 }
 
 // Create takes the representation of a podDisruptionBudget and creates it.  Returns the server's representation of the podDisruptionBudget, and an error, if there is any.
-func (c *podDisruptionBudgets) Create(podDisruptionBudget *v1beta1.PodDisruptionBudget) (result *v1beta1.PodDisruptionBudget, err error) {
+func (c *podDisruptionBudgets) Create(ctx context.Context, podDisruptionBudget *v1beta1.PodDisruptionBudget, opts v1.CreateOptions) (result *v1beta1.PodDisruptionBudget, err error) {
 	result = &v1beta1.PodDisruptionBudget{}
 	err = c.client.Post().
 		Namespace(c.ns).
 		Resource("poddisruptionbudgets").
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(podDisruptionBudget).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Update takes the representation of a podDisruptionBudget and updates it. Returns the server's representation of the podDisruptionBudget, and an error, if there is any.
-func (c *podDisruptionBudgets) Update(podDisruptionBudget *v1beta1.PodDisruptionBudget) (result *v1beta1.PodDisruptionBudget, err error) {
+func (c *podDisruptionBudgets) Update(ctx context.Context, podDisruptionBudget *v1beta1.PodDisruptionBudget, opts v1.UpdateOptions) (result *v1beta1.PodDisruptionBudget, err error) {
 	result = &v1beta1.PodDisruptionBudget{}
 	err = c.client.Put().
 		Namespace(c.ns).
 		Resource("poddisruptionbudgets").
 		Name(podDisruptionBudget.Name).
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(podDisruptionBudget).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // UpdateStatus was generated because the type contains a Status member.
 // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
-
-func (c *podDisruptionBudgets) UpdateStatus(podDisruptionBudget *v1beta1.PodDisruptionBudget) (result *v1beta1.PodDisruptionBudget, err error) {
+func (c *podDisruptionBudgets) UpdateStatus(ctx context.Context, podDisruptionBudget *v1beta1.PodDisruptionBudget, opts v1.UpdateOptions) (result *v1beta1.PodDisruptionBudget, err error) {
 	result = &v1beta1.PodDisruptionBudget{}
 	err = c.client.Put().
 		Namespace(c.ns).
 		Resource("poddisruptionbudgets").
 		Name(podDisruptionBudget.Name).
 		SubResource("status").
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(podDisruptionBudget).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Delete takes name of the podDisruptionBudget and deletes it. Returns an error if one occurs.
-func (c *podDisruptionBudgets) Delete(name string, options *v1.DeleteOptions) error {
+func (c *podDisruptionBudgets) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
 	return c.client.Delete().
 		Namespace(c.ns).
 		Resource("poddisruptionbudgets").
 		Name(name).
-		Body(options).
-		Do().
+		Body(&opts).
+		Do(ctx).
 		Error()
 }
 
 // DeleteCollection deletes a collection of objects.
-func (c *podDisruptionBudgets) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
+func (c *podDisruptionBudgets) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
 	var timeout time.Duration
-	if listOptions.TimeoutSeconds != nil {
-		timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second
+	if listOpts.TimeoutSeconds != nil {
+		timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
 	}
 	return c.client.Delete().
 		Namespace(c.ns).
 		Resource("poddisruptionbudgets").
-		VersionedParams(&listOptions, scheme.ParameterCodec).
+		VersionedParams(&listOpts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Body(options).
-		Do().
+		Body(&opts).
+		Do(ctx).
 		Error()
 }
 
 // Patch applies the patch and returns the patched podDisruptionBudget.
-func (c *podDisruptionBudgets) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.PodDisruptionBudget, err error) {
+func (c *podDisruptionBudgets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.PodDisruptionBudget, err error) {
 	result = &v1beta1.PodDisruptionBudget{}
 	err = c.client.Patch(pt).
 		Namespace(c.ns).
 		Resource("poddisruptionbudgets").
-		SubResource(subresources...).
 		Name(name).
+		SubResource(subresources...).
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(data).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/podsecuritypolicy.go b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/podsecuritypolicy.go
index d02096d7..15d7bb9e 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/podsecuritypolicy.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/podsecuritypolicy.go
@@ -19,6 +19,7 @@ limitations under the License.
 package v1beta1
 
 import (
+	"context"
 	"time"
 
 	v1beta1 "k8s.io/api/policy/v1beta1"
@@ -37,14 +38,14 @@ type PodSecurityPoliciesGetter interface {
 
 // PodSecurityPolicyInterface has methods to work with PodSecurityPolicy resources.
 type PodSecurityPolicyInterface interface {
-	Create(*v1beta1.PodSecurityPolicy) (*v1beta1.PodSecurityPolicy, error)
-	Update(*v1beta1.PodSecurityPolicy) (*v1beta1.PodSecurityPolicy, error)
-	Delete(name string, options *v1.DeleteOptions) error
-	DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error
-	Get(name string, options v1.GetOptions) (*v1beta1.PodSecurityPolicy, error)
-	List(opts v1.ListOptions) (*v1beta1.PodSecurityPolicyList, error)
-	Watch(opts v1.ListOptions) (watch.Interface, error)
-	Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.PodSecurityPolicy, err error)
+	Create(ctx context.Context, podSecurityPolicy *v1beta1.PodSecurityPolicy, opts v1.CreateOptions) (*v1beta1.PodSecurityPolicy, error)
+	Update(ctx context.Context, podSecurityPolicy *v1beta1.PodSecurityPolicy, opts v1.UpdateOptions) (*v1beta1.PodSecurityPolicy, error)
+	Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
+	DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
+	Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.PodSecurityPolicy, error)
+	List(ctx context.Context, opts v1.ListOptions) (*v1beta1.PodSecurityPolicyList, error)
+	Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
+	Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.PodSecurityPolicy, err error)
 	PodSecurityPolicyExpansion
 }
 
@@ -61,19 +62,19 @@ func newPodSecurityPolicies(c *PolicyV1beta1Client) *podSecurityPolicies {
 }
 
 // Get takes name of the podSecurityPolicy, and returns the corresponding podSecurityPolicy object, and an error if there is any.
-func (c *podSecurityPolicies) Get(name string, options v1.GetOptions) (result *v1beta1.PodSecurityPolicy, err error) {
+func (c *podSecurityPolicies) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.PodSecurityPolicy, err error) {
 	result = &v1beta1.PodSecurityPolicy{}
 	err = c.client.Get().
 		Resource("podsecuritypolicies").
 		Name(name).
 		VersionedParams(&options, scheme.ParameterCodec).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // List takes label and field selectors, and returns the list of PodSecurityPolicies that match those selectors.
-func (c *podSecurityPolicies) List(opts v1.ListOptions) (result *v1beta1.PodSecurityPolicyList, err error) {
+func (c *podSecurityPolicies) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.PodSecurityPolicyList, err error) {
 	var timeout time.Duration
 	if opts.TimeoutSeconds != nil {
 		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
@@ -83,13 +84,13 @@ func (c *podSecurityPolicies) List(opts v1.ListOptions) (result *v1beta1.PodSecu
 		Resource("podsecuritypolicies").
 		VersionedParams(&opts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Watch returns a watch.Interface that watches the requested podSecurityPolicies.
-func (c *podSecurityPolicies) Watch(opts v1.ListOptions) (watch.Interface, error) {
+func (c *podSecurityPolicies) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
 	var timeout time.Duration
 	if opts.TimeoutSeconds != nil {
 		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
@@ -99,66 +100,69 @@ func (c *podSecurityPolicies) Watch(opts v1.ListOptions) (watch.Interface, error
 		Resource("podsecuritypolicies").
 		VersionedParams(&opts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Watch()
+		Watch(ctx)
 }
 
 // Create takes the representation of a podSecurityPolicy and creates it.  Returns the server's representation of the podSecurityPolicy, and an error, if there is any.
-func (c *podSecurityPolicies) Create(podSecurityPolicy *v1beta1.PodSecurityPolicy) (result *v1beta1.PodSecurityPolicy, err error) {
+func (c *podSecurityPolicies) Create(ctx context.Context, podSecurityPolicy *v1beta1.PodSecurityPolicy, opts v1.CreateOptions) (result *v1beta1.PodSecurityPolicy, err error) {
 	result = &v1beta1.PodSecurityPolicy{}
 	err = c.client.Post().
 		Resource("podsecuritypolicies").
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(podSecurityPolicy).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Update takes the representation of a podSecurityPolicy and updates it. Returns the server's representation of the podSecurityPolicy, and an error, if there is any.
-func (c *podSecurityPolicies) Update(podSecurityPolicy *v1beta1.PodSecurityPolicy) (result *v1beta1.PodSecurityPolicy, err error) {
+func (c *podSecurityPolicies) Update(ctx context.Context, podSecurityPolicy *v1beta1.PodSecurityPolicy, opts v1.UpdateOptions) (result *v1beta1.PodSecurityPolicy, err error) {
 	result = &v1beta1.PodSecurityPolicy{}
 	err = c.client.Put().
 		Resource("podsecuritypolicies").
 		Name(podSecurityPolicy.Name).
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(podSecurityPolicy).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Delete takes name of the podSecurityPolicy and deletes it. Returns an error if one occurs.
-func (c *podSecurityPolicies) Delete(name string, options *v1.DeleteOptions) error {
+func (c *podSecurityPolicies) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
 	return c.client.Delete().
 		Resource("podsecuritypolicies").
 		Name(name).
-		Body(options).
-		Do().
+		Body(&opts).
+		Do(ctx).
 		Error()
 }
 
 // DeleteCollection deletes a collection of objects.
-func (c *podSecurityPolicies) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
+func (c *podSecurityPolicies) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
 	var timeout time.Duration
-	if listOptions.TimeoutSeconds != nil {
-		timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second
+	if listOpts.TimeoutSeconds != nil {
+		timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
 	}
 	return c.client.Delete().
 		Resource("podsecuritypolicies").
-		VersionedParams(&listOptions, scheme.ParameterCodec).
+		VersionedParams(&listOpts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Body(options).
-		Do().
+		Body(&opts).
+		Do(ctx).
 		Error()
 }
 
 // Patch applies the patch and returns the patched podSecurityPolicy.
-func (c *podSecurityPolicies) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.PodSecurityPolicy, err error) {
+func (c *podSecurityPolicies) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.PodSecurityPolicy, err error) {
 	result = &v1beta1.PodSecurityPolicy{}
 	err = c.client.Patch(pt).
 		Resource("podsecuritypolicies").
-		SubResource(subresources...).
 		Name(name).
+		SubResource(subresources...).
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(data).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/clusterrole.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/clusterrole.go
index 0a47c441..787324d6 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/clusterrole.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/clusterrole.go
@@ -19,6 +19,7 @@ limitations under the License.
 package v1
 
 import (
+	"context"
 	"time"
 
 	v1 "k8s.io/api/rbac/v1"
@@ -37,14 +38,14 @@ type ClusterRolesGetter interface {
 
 // ClusterRoleInterface has methods to work with ClusterRole resources.
 type ClusterRoleInterface interface {
-	Create(*v1.ClusterRole) (*v1.ClusterRole, error)
-	Update(*v1.ClusterRole) (*v1.ClusterRole, error)
-	Delete(name string, options *metav1.DeleteOptions) error
-	DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error
-	Get(name string, options metav1.GetOptions) (*v1.ClusterRole, error)
-	List(opts metav1.ListOptions) (*v1.ClusterRoleList, error)
-	Watch(opts metav1.ListOptions) (watch.Interface, error)
-	Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.ClusterRole, err error)
+	Create(ctx context.Context, clusterRole *v1.ClusterRole, opts metav1.CreateOptions) (*v1.ClusterRole, error)
+	Update(ctx context.Context, clusterRole *v1.ClusterRole, opts metav1.UpdateOptions) (*v1.ClusterRole, error)
+	Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error
+	DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error
+	Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.ClusterRole, error)
+	List(ctx context.Context, opts metav1.ListOptions) (*v1.ClusterRoleList, error)
+	Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error)
+	Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ClusterRole, err error)
 	ClusterRoleExpansion
 }
 
@@ -61,19 +62,19 @@ func newClusterRoles(c *RbacV1Client) *clusterRoles {
 }
 
 // Get takes name of the clusterRole, and returns the corresponding clusterRole object, and an error if there is any.
-func (c *clusterRoles) Get(name string, options metav1.GetOptions) (result *v1.ClusterRole, err error) {
+func (c *clusterRoles) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.ClusterRole, err error) {
 	result = &v1.ClusterRole{}
 	err = c.client.Get().
 		Resource("clusterroles").
 		Name(name).
 		VersionedParams(&options, scheme.ParameterCodec).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // List takes label and field selectors, and returns the list of ClusterRoles that match those selectors.
-func (c *clusterRoles) List(opts metav1.ListOptions) (result *v1.ClusterRoleList, err error) {
+func (c *clusterRoles) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ClusterRoleList, err error) {
 	var timeout time.Duration
 	if opts.TimeoutSeconds != nil {
 		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
@@ -83,13 +84,13 @@ func (c *clusterRoles) List(opts metav1.ListOptions) (result *v1.ClusterRoleList
 		Resource("clusterroles").
 		VersionedParams(&opts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Watch returns a watch.Interface that watches the requested clusterRoles.
-func (c *clusterRoles) Watch(opts metav1.ListOptions) (watch.Interface, error) {
+func (c *clusterRoles) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
 	var timeout time.Duration
 	if opts.TimeoutSeconds != nil {
 		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
@@ -99,66 +100,69 @@ func (c *clusterRoles) Watch(opts metav1.ListOptions) (watch.Interface, error) {
 		Resource("clusterroles").
 		VersionedParams(&opts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Watch()
+		Watch(ctx)
 }
 
 // Create takes the representation of a clusterRole and creates it.  Returns the server's representation of the clusterRole, and an error, if there is any.
-func (c *clusterRoles) Create(clusterRole *v1.ClusterRole) (result *v1.ClusterRole, err error) {
+func (c *clusterRoles) Create(ctx context.Context, clusterRole *v1.ClusterRole, opts metav1.CreateOptions) (result *v1.ClusterRole, err error) {
 	result = &v1.ClusterRole{}
 	err = c.client.Post().
 		Resource("clusterroles").
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(clusterRole).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Update takes the representation of a clusterRole and updates it. Returns the server's representation of the clusterRole, and an error, if there is any.
-func (c *clusterRoles) Update(clusterRole *v1.ClusterRole) (result *v1.ClusterRole, err error) {
+func (c *clusterRoles) Update(ctx context.Context, clusterRole *v1.ClusterRole, opts metav1.UpdateOptions) (result *v1.ClusterRole, err error) {
 	result = &v1.ClusterRole{}
 	err = c.client.Put().
 		Resource("clusterroles").
 		Name(clusterRole.Name).
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(clusterRole).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Delete takes name of the clusterRole and deletes it. Returns an error if one occurs.
-func (c *clusterRoles) Delete(name string, options *metav1.DeleteOptions) error {
+func (c *clusterRoles) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error {
 	return c.client.Delete().
 		Resource("clusterroles").
 		Name(name).
-		Body(options).
-		Do().
+		Body(&opts).
+		Do(ctx).
 		Error()
 }
 
 // DeleteCollection deletes a collection of objects.
-func (c *clusterRoles) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error {
+func (c *clusterRoles) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error {
 	var timeout time.Duration
-	if listOptions.TimeoutSeconds != nil {
-		timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second
+	if listOpts.TimeoutSeconds != nil {
+		timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
 	}
 	return c.client.Delete().
 		Resource("clusterroles").
-		VersionedParams(&listOptions, scheme.ParameterCodec).
+		VersionedParams(&listOpts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Body(options).
-		Do().
+		Body(&opts).
+		Do(ctx).
 		Error()
 }
 
 // Patch applies the patch and returns the patched clusterRole.
-func (c *clusterRoles) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.ClusterRole, err error) {
+func (c *clusterRoles) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ClusterRole, err error) {
 	result = &v1.ClusterRole{}
 	err = c.client.Patch(pt).
 		Resource("clusterroles").
-		SubResource(subresources...).
 		Name(name).
+		SubResource(subresources...).
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(data).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/clusterrolebinding.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/clusterrolebinding.go
index c16ebc31..83e8c81b 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/clusterrolebinding.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/clusterrolebinding.go
@@ -19,6 +19,7 @@ limitations under the License.
 package v1
 
 import (
+	"context"
 	"time"
 
 	v1 "k8s.io/api/rbac/v1"
@@ -37,14 +38,14 @@ type ClusterRoleBindingsGetter interface {
 
 // ClusterRoleBindingInterface has methods to work with ClusterRoleBinding resources.
 type ClusterRoleBindingInterface interface {
-	Create(*v1.ClusterRoleBinding) (*v1.ClusterRoleBinding, error)
-	Update(*v1.ClusterRoleBinding) (*v1.ClusterRoleBinding, error)
-	Delete(name string, options *metav1.DeleteOptions) error
-	DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error
-	Get(name string, options metav1.GetOptions) (*v1.ClusterRoleBinding, error)
-	List(opts metav1.ListOptions) (*v1.ClusterRoleBindingList, error)
-	Watch(opts metav1.ListOptions) (watch.Interface, error)
-	Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.ClusterRoleBinding, err error)
+	Create(ctx context.Context, clusterRoleBinding *v1.ClusterRoleBinding, opts metav1.CreateOptions) (*v1.ClusterRoleBinding, error)
+	Update(ctx context.Context, clusterRoleBinding *v1.ClusterRoleBinding, opts metav1.UpdateOptions) (*v1.ClusterRoleBinding, error)
+	Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error
+	DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error
+	Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.ClusterRoleBinding, error)
+	List(ctx context.Context, opts metav1.ListOptions) (*v1.ClusterRoleBindingList, error)
+	Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error)
+	Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ClusterRoleBinding, err error)
 	ClusterRoleBindingExpansion
 }
 
@@ -61,19 +62,19 @@ func newClusterRoleBindings(c *RbacV1Client) *clusterRoleBindings {
 }
 
 // Get takes name of the clusterRoleBinding, and returns the corresponding clusterRoleBinding object, and an error if there is any.
-func (c *clusterRoleBindings) Get(name string, options metav1.GetOptions) (result *v1.ClusterRoleBinding, err error) {
+func (c *clusterRoleBindings) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.ClusterRoleBinding, err error) {
 	result = &v1.ClusterRoleBinding{}
 	err = c.client.Get().
 		Resource("clusterrolebindings").
 		Name(name).
 		VersionedParams(&options, scheme.ParameterCodec).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // List takes label and field selectors, and returns the list of ClusterRoleBindings that match those selectors.
-func (c *clusterRoleBindings) List(opts metav1.ListOptions) (result *v1.ClusterRoleBindingList, err error) {
+func (c *clusterRoleBindings) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ClusterRoleBindingList, err error) {
 	var timeout time.Duration
 	if opts.TimeoutSeconds != nil {
 		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
@@ -83,13 +84,13 @@ func (c *clusterRoleBindings) List(opts metav1.ListOptions) (result *v1.ClusterR
 		Resource("clusterrolebindings").
 		VersionedParams(&opts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Watch returns a watch.Interface that watches the requested clusterRoleBindings.
-func (c *clusterRoleBindings) Watch(opts metav1.ListOptions) (watch.Interface, error) {
+func (c *clusterRoleBindings) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
 	var timeout time.Duration
 	if opts.TimeoutSeconds != nil {
 		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
@@ -99,66 +100,69 @@ func (c *clusterRoleBindings) Watch(opts metav1.ListOptions) (watch.Interface, e
 		Resource("clusterrolebindings").
 		VersionedParams(&opts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Watch()
+		Watch(ctx)
 }
 
 // Create takes the representation of a clusterRoleBinding and creates it.  Returns the server's representation of the clusterRoleBinding, and an error, if there is any.
-func (c *clusterRoleBindings) Create(clusterRoleBinding *v1.ClusterRoleBinding) (result *v1.ClusterRoleBinding, err error) {
+func (c *clusterRoleBindings) Create(ctx context.Context, clusterRoleBinding *v1.ClusterRoleBinding, opts metav1.CreateOptions) (result *v1.ClusterRoleBinding, err error) {
 	result = &v1.ClusterRoleBinding{}
 	err = c.client.Post().
 		Resource("clusterrolebindings").
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(clusterRoleBinding).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Update takes the representation of a clusterRoleBinding and updates it. Returns the server's representation of the clusterRoleBinding, and an error, if there is any.
-func (c *clusterRoleBindings) Update(clusterRoleBinding *v1.ClusterRoleBinding) (result *v1.ClusterRoleBinding, err error) {
+func (c *clusterRoleBindings) Update(ctx context.Context, clusterRoleBinding *v1.ClusterRoleBinding, opts metav1.UpdateOptions) (result *v1.ClusterRoleBinding, err error) {
 	result = &v1.ClusterRoleBinding{}
 	err = c.client.Put().
 		Resource("clusterrolebindings").
 		Name(clusterRoleBinding.Name).
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(clusterRoleBinding).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Delete takes name of the clusterRoleBinding and deletes it. Returns an error if one occurs.
-func (c *clusterRoleBindings) Delete(name string, options *metav1.DeleteOptions) error {
+func (c *clusterRoleBindings) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error {
 	return c.client.Delete().
 		Resource("clusterrolebindings").
 		Name(name).
-		Body(options).
-		Do().
+		Body(&opts).
+		Do(ctx).
 		Error()
 }
 
 // DeleteCollection deletes a collection of objects.
-func (c *clusterRoleBindings) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error {
+func (c *clusterRoleBindings) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error {
 	var timeout time.Duration
-	if listOptions.TimeoutSeconds != nil {
-		timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second
+	if listOpts.TimeoutSeconds != nil {
+		timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
 	}
 	return c.client.Delete().
 		Resource("clusterrolebindings").
-		VersionedParams(&listOptions, scheme.ParameterCodec).
+		VersionedParams(&listOpts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Body(options).
-		Do().
+		Body(&opts).
+		Do(ctx).
 		Error()
 }
 
 // Patch applies the patch and returns the patched clusterRoleBinding.
-func (c *clusterRoleBindings) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.ClusterRoleBinding, err error) {
+func (c *clusterRoleBindings) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ClusterRoleBinding, err error) {
 	result = &v1.ClusterRoleBinding{}
 	err = c.client.Patch(pt).
 		Resource("clusterrolebindings").
-		SubResource(subresources...).
 		Name(name).
+		SubResource(subresources...).
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(data).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/role.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/role.go
index a17d791f..c31e22b6 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/role.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/role.go
@@ -19,6 +19,7 @@ limitations under the License.
 package v1
 
 import (
+	"context"
 	"time"
 
 	v1 "k8s.io/api/rbac/v1"
@@ -37,14 +38,14 @@ type RolesGetter interface {
 
 // RoleInterface has methods to work with Role resources.
 type RoleInterface interface {
-	Create(*v1.Role) (*v1.Role, error)
-	Update(*v1.Role) (*v1.Role, error)
-	Delete(name string, options *metav1.DeleteOptions) error
-	DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error
-	Get(name string, options metav1.GetOptions) (*v1.Role, error)
-	List(opts metav1.ListOptions) (*v1.RoleList, error)
-	Watch(opts metav1.ListOptions) (watch.Interface, error)
-	Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Role, err error)
+	Create(ctx context.Context, role *v1.Role, opts metav1.CreateOptions) (*v1.Role, error)
+	Update(ctx context.Context, role *v1.Role, opts metav1.UpdateOptions) (*v1.Role, error)
+	Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error
+	DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error
+	Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.Role, error)
+	List(ctx context.Context, opts metav1.ListOptions) (*v1.RoleList, error)
+	Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error)
+	Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Role, err error)
 	RoleExpansion
 }
 
@@ -63,20 +64,20 @@ func newRoles(c *RbacV1Client, namespace string) *roles {
 }
 
 // Get takes name of the role, and returns the corresponding role object, and an error if there is any.
-func (c *roles) Get(name string, options metav1.GetOptions) (result *v1.Role, err error) {
+func (c *roles) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Role, err error) {
 	result = &v1.Role{}
 	err = c.client.Get().
 		Namespace(c.ns).
 		Resource("roles").
 		Name(name).
 		VersionedParams(&options, scheme.ParameterCodec).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // List takes label and field selectors, and returns the list of Roles that match those selectors.
-func (c *roles) List(opts metav1.ListOptions) (result *v1.RoleList, err error) {
+func (c *roles) List(ctx context.Context, opts metav1.ListOptions) (result *v1.RoleList, err error) {
 	var timeout time.Duration
 	if opts.TimeoutSeconds != nil {
 		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
@@ -87,13 +88,13 @@ func (c *roles) List(opts metav1.ListOptions) (result *v1.RoleList, err error) {
 		Resource("roles").
 		VersionedParams(&opts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Watch returns a watch.Interface that watches the requested roles.
-func (c *roles) Watch(opts metav1.ListOptions) (watch.Interface, error) {
+func (c *roles) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
 	var timeout time.Duration
 	if opts.TimeoutSeconds != nil {
 		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
@@ -104,71 +105,74 @@ func (c *roles) Watch(opts metav1.ListOptions) (watch.Interface, error) {
 		Resource("roles").
 		VersionedParams(&opts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Watch()
+		Watch(ctx)
 }
 
 // Create takes the representation of a role and creates it.  Returns the server's representation of the role, and an error, if there is any.
-func (c *roles) Create(role *v1.Role) (result *v1.Role, err error) {
+func (c *roles) Create(ctx context.Context, role *v1.Role, opts metav1.CreateOptions) (result *v1.Role, err error) {
 	result = &v1.Role{}
 	err = c.client.Post().
 		Namespace(c.ns).
 		Resource("roles").
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(role).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Update takes the representation of a role and updates it. Returns the server's representation of the role, and an error, if there is any.
-func (c *roles) Update(role *v1.Role) (result *v1.Role, err error) {
+func (c *roles) Update(ctx context.Context, role *v1.Role, opts metav1.UpdateOptions) (result *v1.Role, err error) {
 	result = &v1.Role{}
 	err = c.client.Put().
 		Namespace(c.ns).
 		Resource("roles").
 		Name(role.Name).
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(role).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Delete takes name of the role and deletes it. Returns an error if one occurs.
-func (c *roles) Delete(name string, options *metav1.DeleteOptions) error {
+func (c *roles) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error {
 	return c.client.Delete().
 		Namespace(c.ns).
 		Resource("roles").
 		Name(name).
-		Body(options).
-		Do().
+		Body(&opts).
+		Do(ctx).
 		Error()
 }
 
 // DeleteCollection deletes a collection of objects.
-func (c *roles) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error {
+func (c *roles) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error {
 	var timeout time.Duration
-	if listOptions.TimeoutSeconds != nil {
-		timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second
+	if listOpts.TimeoutSeconds != nil {
+		timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
 	}
 	return c.client.Delete().
 		Namespace(c.ns).
 		Resource("roles").
-		VersionedParams(&listOptions, scheme.ParameterCodec).
+		VersionedParams(&listOpts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Body(options).
-		Do().
+		Body(&opts).
+		Do(ctx).
 		Error()
 }
 
 // Patch applies the patch and returns the patched role.
-func (c *roles) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Role, err error) {
+func (c *roles) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Role, err error) {
 	result = &v1.Role{}
 	err = c.client.Patch(pt).
 		Namespace(c.ns).
 		Resource("roles").
-		SubResource(subresources...).
 		Name(name).
+		SubResource(subresources...).
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(data).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/rolebinding.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/rolebinding.go
index c87e4571..160fc16e 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/rolebinding.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/rolebinding.go
@@ -19,6 +19,7 @@ limitations under the License.
 package v1
 
 import (
+	"context"
 	"time"
 
 	v1 "k8s.io/api/rbac/v1"
@@ -37,14 +38,14 @@ type RoleBindingsGetter interface {
 
 // RoleBindingInterface has methods to work with RoleBinding resources.
 type RoleBindingInterface interface {
-	Create(*v1.RoleBinding) (*v1.RoleBinding, error)
-	Update(*v1.RoleBinding) (*v1.RoleBinding, error)
-	Delete(name string, options *metav1.DeleteOptions) error
-	DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error
-	Get(name string, options metav1.GetOptions) (*v1.RoleBinding, error)
-	List(opts metav1.ListOptions) (*v1.RoleBindingList, error)
-	Watch(opts metav1.ListOptions) (watch.Interface, error)
-	Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.RoleBinding, err error)
+	Create(ctx context.Context, roleBinding *v1.RoleBinding, opts metav1.CreateOptions) (*v1.RoleBinding, error)
+	Update(ctx context.Context, roleBinding *v1.RoleBinding, opts metav1.UpdateOptions) (*v1.RoleBinding, error)
+	Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error
+	DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error
+	Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.RoleBinding, error)
+	List(ctx context.Context, opts metav1.ListOptions) (*v1.RoleBindingList, error)
+	Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error)
+	Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.RoleBinding, err error)
 	RoleBindingExpansion
 }
 
@@ -63,20 +64,20 @@ func newRoleBindings(c *RbacV1Client, namespace string) *roleBindings {
 }
 
 // Get takes name of the roleBinding, and returns the corresponding roleBinding object, and an error if there is any.
-func (c *roleBindings) Get(name string, options metav1.GetOptions) (result *v1.RoleBinding, err error) {
+func (c *roleBindings) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.RoleBinding, err error) {
 	result = &v1.RoleBinding{}
 	err = c.client.Get().
 		Namespace(c.ns).
 		Resource("rolebindings").
 		Name(name).
 		VersionedParams(&options, scheme.ParameterCodec).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // List takes label and field selectors, and returns the list of RoleBindings that match those selectors.
-func (c *roleBindings) List(opts metav1.ListOptions) (result *v1.RoleBindingList, err error) {
+func (c *roleBindings) List(ctx context.Context, opts metav1.ListOptions) (result *v1.RoleBindingList, err error) {
 	var timeout time.Duration
 	if opts.TimeoutSeconds != nil {
 		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
@@ -87,13 +88,13 @@ func (c *roleBindings) List(opts metav1.ListOptions) (result *v1.RoleBindingList
 		Resource("rolebindings").
 		VersionedParams(&opts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Watch returns a watch.Interface that watches the requested roleBindings.
-func (c *roleBindings) Watch(opts metav1.ListOptions) (watch.Interface, error) {
+func (c *roleBindings) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
 	var timeout time.Duration
 	if opts.TimeoutSeconds != nil {
 		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
@@ -104,71 +105,74 @@ func (c *roleBindings) Watch(opts metav1.ListOptions) (watch.Interface, error) {
 		Resource("rolebindings").
 		VersionedParams(&opts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Watch()
+		Watch(ctx)
 }
 
 // Create takes the representation of a roleBinding and creates it.  Returns the server's representation of the roleBinding, and an error, if there is any.
-func (c *roleBindings) Create(roleBinding *v1.RoleBinding) (result *v1.RoleBinding, err error) {
+func (c *roleBindings) Create(ctx context.Context, roleBinding *v1.RoleBinding, opts metav1.CreateOptions) (result *v1.RoleBinding, err error) {
 	result = &v1.RoleBinding{}
 	err = c.client.Post().
 		Namespace(c.ns).
 		Resource("rolebindings").
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(roleBinding).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Update takes the representation of a roleBinding and updates it. Returns the server's representation of the roleBinding, and an error, if there is any.
-func (c *roleBindings) Update(roleBinding *v1.RoleBinding) (result *v1.RoleBinding, err error) {
+func (c *roleBindings) Update(ctx context.Context, roleBinding *v1.RoleBinding, opts metav1.UpdateOptions) (result *v1.RoleBinding, err error) {
 	result = &v1.RoleBinding{}
 	err = c.client.Put().
 		Namespace(c.ns).
 		Resource("rolebindings").
 		Name(roleBinding.Name).
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(roleBinding).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Delete takes name of the roleBinding and deletes it. Returns an error if one occurs.
-func (c *roleBindings) Delete(name string, options *metav1.DeleteOptions) error {
+func (c *roleBindings) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error {
 	return c.client.Delete().
 		Namespace(c.ns).
 		Resource("rolebindings").
 		Name(name).
-		Body(options).
-		Do().
+		Body(&opts).
+		Do(ctx).
 		Error()
 }
 
 // DeleteCollection deletes a collection of objects.
-func (c *roleBindings) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error {
+func (c *roleBindings) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error {
 	var timeout time.Duration
-	if listOptions.TimeoutSeconds != nil {
-		timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second
+	if listOpts.TimeoutSeconds != nil {
+		timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
 	}
 	return c.client.Delete().
 		Namespace(c.ns).
 		Resource("rolebindings").
-		VersionedParams(&listOptions, scheme.ParameterCodec).
+		VersionedParams(&listOpts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Body(options).
-		Do().
+		Body(&opts).
+		Do(ctx).
 		Error()
 }
 
 // Patch applies the patch and returns the patched roleBinding.
-func (c *roleBindings) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.RoleBinding, err error) {
+func (c *roleBindings) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.RoleBinding, err error) {
 	result = &v1.RoleBinding{}
 	err = c.client.Patch(pt).
 		Namespace(c.ns).
 		Resource("rolebindings").
-		SubResource(subresources...).
 		Name(name).
+		SubResource(subresources...).
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(data).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/clusterrole.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/clusterrole.go
index 77e66877..678d3711 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/clusterrole.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/clusterrole.go
@@ -19,6 +19,7 @@ limitations under the License.
 package v1alpha1
 
 import (
+	"context"
 	"time"
 
 	v1alpha1 "k8s.io/api/rbac/v1alpha1"
@@ -37,14 +38,14 @@ type ClusterRolesGetter interface {
 
 // ClusterRoleInterface has methods to work with ClusterRole resources.
 type ClusterRoleInterface interface {
-	Create(*v1alpha1.ClusterRole) (*v1alpha1.ClusterRole, error)
-	Update(*v1alpha1.ClusterRole) (*v1alpha1.ClusterRole, error)
-	Delete(name string, options *v1.DeleteOptions) error
-	DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error
-	Get(name string, options v1.GetOptions) (*v1alpha1.ClusterRole, error)
-	List(opts v1.ListOptions) (*v1alpha1.ClusterRoleList, error)
-	Watch(opts v1.ListOptions) (watch.Interface, error)
-	Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.ClusterRole, err error)
+	Create(ctx context.Context, clusterRole *v1alpha1.ClusterRole, opts v1.CreateOptions) (*v1alpha1.ClusterRole, error)
+	Update(ctx context.Context, clusterRole *v1alpha1.ClusterRole, opts v1.UpdateOptions) (*v1alpha1.ClusterRole, error)
+	Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
+	DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
+	Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.ClusterRole, error)
+	List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.ClusterRoleList, error)
+	Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
+	Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ClusterRole, err error)
 	ClusterRoleExpansion
 }
 
@@ -61,19 +62,19 @@ func newClusterRoles(c *RbacV1alpha1Client) *clusterRoles {
 }
 
 // Get takes name of the clusterRole, and returns the corresponding clusterRole object, and an error if there is any.
-func (c *clusterRoles) Get(name string, options v1.GetOptions) (result *v1alpha1.ClusterRole, err error) {
+func (c *clusterRoles) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.ClusterRole, err error) {
 	result = &v1alpha1.ClusterRole{}
 	err = c.client.Get().
 		Resource("clusterroles").
 		Name(name).
 		VersionedParams(&options, scheme.ParameterCodec).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // List takes label and field selectors, and returns the list of ClusterRoles that match those selectors.
-func (c *clusterRoles) List(opts v1.ListOptions) (result *v1alpha1.ClusterRoleList, err error) {
+func (c *clusterRoles) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.ClusterRoleList, err error) {
 	var timeout time.Duration
 	if opts.TimeoutSeconds != nil {
 		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
@@ -83,13 +84,13 @@ func (c *clusterRoles) List(opts v1.ListOptions) (result *v1alpha1.ClusterRoleLi
 		Resource("clusterroles").
 		VersionedParams(&opts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Watch returns a watch.Interface that watches the requested clusterRoles.
-func (c *clusterRoles) Watch(opts v1.ListOptions) (watch.Interface, error) {
+func (c *clusterRoles) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
 	var timeout time.Duration
 	if opts.TimeoutSeconds != nil {
 		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
@@ -99,66 +100,69 @@ func (c *clusterRoles) Watch(opts v1.ListOptions) (watch.Interface, error) {
 		Resource("clusterroles").
 		VersionedParams(&opts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Watch()
+		Watch(ctx)
 }
 
 // Create takes the representation of a clusterRole and creates it.  Returns the server's representation of the clusterRole, and an error, if there is any.
-func (c *clusterRoles) Create(clusterRole *v1alpha1.ClusterRole) (result *v1alpha1.ClusterRole, err error) {
+func (c *clusterRoles) Create(ctx context.Context, clusterRole *v1alpha1.ClusterRole, opts v1.CreateOptions) (result *v1alpha1.ClusterRole, err error) {
 	result = &v1alpha1.ClusterRole{}
 	err = c.client.Post().
 		Resource("clusterroles").
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(clusterRole).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Update takes the representation of a clusterRole and updates it. Returns the server's representation of the clusterRole, and an error, if there is any.
-func (c *clusterRoles) Update(clusterRole *v1alpha1.ClusterRole) (result *v1alpha1.ClusterRole, err error) {
+func (c *clusterRoles) Update(ctx context.Context, clusterRole *v1alpha1.ClusterRole, opts v1.UpdateOptions) (result *v1alpha1.ClusterRole, err error) {
 	result = &v1alpha1.ClusterRole{}
 	err = c.client.Put().
 		Resource("clusterroles").
 		Name(clusterRole.Name).
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(clusterRole).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Delete takes name of the clusterRole and deletes it. Returns an error if one occurs.
-func (c *clusterRoles) Delete(name string, options *v1.DeleteOptions) error {
+func (c *clusterRoles) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
 	return c.client.Delete().
 		Resource("clusterroles").
 		Name(name).
-		Body(options).
-		Do().
+		Body(&opts).
+		Do(ctx).
 		Error()
 }
 
 // DeleteCollection deletes a collection of objects.
-func (c *clusterRoles) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
+func (c *clusterRoles) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
 	var timeout time.Duration
-	if listOptions.TimeoutSeconds != nil {
-		timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second
+	if listOpts.TimeoutSeconds != nil {
+		timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
 	}
 	return c.client.Delete().
 		Resource("clusterroles").
-		VersionedParams(&listOptions, scheme.ParameterCodec).
+		VersionedParams(&listOpts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Body(options).
-		Do().
+		Body(&opts).
+		Do(ctx).
 		Error()
 }
 
 // Patch applies the patch and returns the patched clusterRole.
-func (c *clusterRoles) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.ClusterRole, err error) {
+func (c *clusterRoles) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ClusterRole, err error) {
 	result = &v1alpha1.ClusterRole{}
 	err = c.client.Patch(pt).
 		Resource("clusterroles").
-		SubResource(subresources...).
 		Name(name).
+		SubResource(subresources...).
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(data).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/clusterrolebinding.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/clusterrolebinding.go
index 0d1b9d20..7a9ca295 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/clusterrolebinding.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/clusterrolebinding.go
@@ -19,6 +19,7 @@ limitations under the License.
 package v1alpha1
 
 import (
+	"context"
 	"time"
 
 	v1alpha1 "k8s.io/api/rbac/v1alpha1"
@@ -37,14 +38,14 @@ type ClusterRoleBindingsGetter interface {
 
 // ClusterRoleBindingInterface has methods to work with ClusterRoleBinding resources.
 type ClusterRoleBindingInterface interface {
-	Create(*v1alpha1.ClusterRoleBinding) (*v1alpha1.ClusterRoleBinding, error)
-	Update(*v1alpha1.ClusterRoleBinding) (*v1alpha1.ClusterRoleBinding, error)
-	Delete(name string, options *v1.DeleteOptions) error
-	DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error
-	Get(name string, options v1.GetOptions) (*v1alpha1.ClusterRoleBinding, error)
-	List(opts v1.ListOptions) (*v1alpha1.ClusterRoleBindingList, error)
-	Watch(opts v1.ListOptions) (watch.Interface, error)
-	Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.ClusterRoleBinding, err error)
+	Create(ctx context.Context, clusterRoleBinding *v1alpha1.ClusterRoleBinding, opts v1.CreateOptions) (*v1alpha1.ClusterRoleBinding, error)
+	Update(ctx context.Context, clusterRoleBinding *v1alpha1.ClusterRoleBinding, opts v1.UpdateOptions) (*v1alpha1.ClusterRoleBinding, error)
+	Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
+	DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
+	Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.ClusterRoleBinding, error)
+	List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.ClusterRoleBindingList, error)
+	Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
+	Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ClusterRoleBinding, err error)
 	ClusterRoleBindingExpansion
 }
 
@@ -61,19 +62,19 @@ func newClusterRoleBindings(c *RbacV1alpha1Client) *clusterRoleBindings {
 }
 
 // Get takes name of the clusterRoleBinding, and returns the corresponding clusterRoleBinding object, and an error if there is any.
-func (c *clusterRoleBindings) Get(name string, options v1.GetOptions) (result *v1alpha1.ClusterRoleBinding, err error) {
+func (c *clusterRoleBindings) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.ClusterRoleBinding, err error) {
 	result = &v1alpha1.ClusterRoleBinding{}
 	err = c.client.Get().
 		Resource("clusterrolebindings").
 		Name(name).
 		VersionedParams(&options, scheme.ParameterCodec).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // List takes label and field selectors, and returns the list of ClusterRoleBindings that match those selectors.
-func (c *clusterRoleBindings) List(opts v1.ListOptions) (result *v1alpha1.ClusterRoleBindingList, err error) {
+func (c *clusterRoleBindings) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.ClusterRoleBindingList, err error) {
 	var timeout time.Duration
 	if opts.TimeoutSeconds != nil {
 		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
@@ -83,13 +84,13 @@ func (c *clusterRoleBindings) List(opts v1.ListOptions) (result *v1alpha1.Cluste
 		Resource("clusterrolebindings").
 		VersionedParams(&opts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Watch returns a watch.Interface that watches the requested clusterRoleBindings.
-func (c *clusterRoleBindings) Watch(opts v1.ListOptions) (watch.Interface, error) {
+func (c *clusterRoleBindings) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
 	var timeout time.Duration
 	if opts.TimeoutSeconds != nil {
 		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
@@ -99,66 +100,69 @@ func (c *clusterRoleBindings) Watch(opts v1.ListOptions) (watch.Interface, error
 		Resource("clusterrolebindings").
 		VersionedParams(&opts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Watch()
+		Watch(ctx)
 }
 
 // Create takes the representation of a clusterRoleBinding and creates it.  Returns the server's representation of the clusterRoleBinding, and an error, if there is any.
-func (c *clusterRoleBindings) Create(clusterRoleBinding *v1alpha1.ClusterRoleBinding) (result *v1alpha1.ClusterRoleBinding, err error) {
+func (c *clusterRoleBindings) Create(ctx context.Context, clusterRoleBinding *v1alpha1.ClusterRoleBinding, opts v1.CreateOptions) (result *v1alpha1.ClusterRoleBinding, err error) {
 	result = &v1alpha1.ClusterRoleBinding{}
 	err = c.client.Post().
 		Resource("clusterrolebindings").
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(clusterRoleBinding).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Update takes the representation of a clusterRoleBinding and updates it. Returns the server's representation of the clusterRoleBinding, and an error, if there is any.
-func (c *clusterRoleBindings) Update(clusterRoleBinding *v1alpha1.ClusterRoleBinding) (result *v1alpha1.ClusterRoleBinding, err error) {
+func (c *clusterRoleBindings) Update(ctx context.Context, clusterRoleBinding *v1alpha1.ClusterRoleBinding, opts v1.UpdateOptions) (result *v1alpha1.ClusterRoleBinding, err error) {
 	result = &v1alpha1.ClusterRoleBinding{}
 	err = c.client.Put().
 		Resource("clusterrolebindings").
 		Name(clusterRoleBinding.Name).
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(clusterRoleBinding).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Delete takes name of the clusterRoleBinding and deletes it. Returns an error if one occurs.
-func (c *clusterRoleBindings) Delete(name string, options *v1.DeleteOptions) error {
+func (c *clusterRoleBindings) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
 	return c.client.Delete().
 		Resource("clusterrolebindings").
 		Name(name).
-		Body(options).
-		Do().
+		Body(&opts).
+		Do(ctx).
 		Error()
 }
 
 // DeleteCollection deletes a collection of objects.
-func (c *clusterRoleBindings) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
+func (c *clusterRoleBindings) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
 	var timeout time.Duration
-	if listOptions.TimeoutSeconds != nil {
-		timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second
+	if listOpts.TimeoutSeconds != nil {
+		timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
 	}
 	return c.client.Delete().
 		Resource("clusterrolebindings").
-		VersionedParams(&listOptions, scheme.ParameterCodec).
+		VersionedParams(&listOpts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Body(options).
-		Do().
+		Body(&opts).
+		Do(ctx).
 		Error()
 }
 
 // Patch applies the patch and returns the patched clusterRoleBinding.
-func (c *clusterRoleBindings) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.ClusterRoleBinding, err error) {
+func (c *clusterRoleBindings) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ClusterRoleBinding, err error) {
 	result = &v1alpha1.ClusterRoleBinding{}
 	err = c.client.Patch(pt).
 		Resource("clusterrolebindings").
-		SubResource(subresources...).
 		Name(name).
+		SubResource(subresources...).
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(data).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/role.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/role.go
index 4a4b6724..56ec6e37 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/role.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/role.go
@@ -19,6 +19,7 @@ limitations under the License.
 package v1alpha1
 
 import (
+	"context"
 	"time"
 
 	v1alpha1 "k8s.io/api/rbac/v1alpha1"
@@ -37,14 +38,14 @@ type RolesGetter interface {
 
 // RoleInterface has methods to work with Role resources.
 type RoleInterface interface {
-	Create(*v1alpha1.Role) (*v1alpha1.Role, error)
-	Update(*v1alpha1.Role) (*v1alpha1.Role, error)
-	Delete(name string, options *v1.DeleteOptions) error
-	DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error
-	Get(name string, options v1.GetOptions) (*v1alpha1.Role, error)
-	List(opts v1.ListOptions) (*v1alpha1.RoleList, error)
-	Watch(opts v1.ListOptions) (watch.Interface, error)
-	Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.Role, err error)
+	Create(ctx context.Context, role *v1alpha1.Role, opts v1.CreateOptions) (*v1alpha1.Role, error)
+	Update(ctx context.Context, role *v1alpha1.Role, opts v1.UpdateOptions) (*v1alpha1.Role, error)
+	Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
+	DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
+	Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.Role, error)
+	List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.RoleList, error)
+	Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
+	Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.Role, err error)
 	RoleExpansion
 }
 
@@ -63,20 +64,20 @@ func newRoles(c *RbacV1alpha1Client, namespace string) *roles {
 }
 
 // Get takes name of the role, and returns the corresponding role object, and an error if there is any.
-func (c *roles) Get(name string, options v1.GetOptions) (result *v1alpha1.Role, err error) {
+func (c *roles) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.Role, err error) {
 	result = &v1alpha1.Role{}
 	err = c.client.Get().
 		Namespace(c.ns).
 		Resource("roles").
 		Name(name).
 		VersionedParams(&options, scheme.ParameterCodec).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // List takes label and field selectors, and returns the list of Roles that match those selectors.
-func (c *roles) List(opts v1.ListOptions) (result *v1alpha1.RoleList, err error) {
+func (c *roles) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.RoleList, err error) {
 	var timeout time.Duration
 	if opts.TimeoutSeconds != nil {
 		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
@@ -87,13 +88,13 @@ func (c *roles) List(opts v1.ListOptions) (result *v1alpha1.RoleList, err error)
 		Resource("roles").
 		VersionedParams(&opts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Watch returns a watch.Interface that watches the requested roles.
-func (c *roles) Watch(opts v1.ListOptions) (watch.Interface, error) {
+func (c *roles) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
 	var timeout time.Duration
 	if opts.TimeoutSeconds != nil {
 		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
@@ -104,71 +105,74 @@ func (c *roles) Watch(opts v1.ListOptions) (watch.Interface, error) {
 		Resource("roles").
 		VersionedParams(&opts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Watch()
+		Watch(ctx)
 }
 
 // Create takes the representation of a role and creates it.  Returns the server's representation of the role, and an error, if there is any.
-func (c *roles) Create(role *v1alpha1.Role) (result *v1alpha1.Role, err error) {
+func (c *roles) Create(ctx context.Context, role *v1alpha1.Role, opts v1.CreateOptions) (result *v1alpha1.Role, err error) {
 	result = &v1alpha1.Role{}
 	err = c.client.Post().
 		Namespace(c.ns).
 		Resource("roles").
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(role).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Update takes the representation of a role and updates it. Returns the server's representation of the role, and an error, if there is any.
-func (c *roles) Update(role *v1alpha1.Role) (result *v1alpha1.Role, err error) {
+func (c *roles) Update(ctx context.Context, role *v1alpha1.Role, opts v1.UpdateOptions) (result *v1alpha1.Role, err error) {
 	result = &v1alpha1.Role{}
 	err = c.client.Put().
 		Namespace(c.ns).
 		Resource("roles").
 		Name(role.Name).
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(role).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Delete takes name of the role and deletes it. Returns an error if one occurs.
-func (c *roles) Delete(name string, options *v1.DeleteOptions) error {
+func (c *roles) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
 	return c.client.Delete().
 		Namespace(c.ns).
 		Resource("roles").
 		Name(name).
-		Body(options).
-		Do().
+		Body(&opts).
+		Do(ctx).
 		Error()
 }
 
 // DeleteCollection deletes a collection of objects.
-func (c *roles) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
+func (c *roles) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
 	var timeout time.Duration
-	if listOptions.TimeoutSeconds != nil {
-		timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second
+	if listOpts.TimeoutSeconds != nil {
+		timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
 	}
 	return c.client.Delete().
 		Namespace(c.ns).
 		Resource("roles").
-		VersionedParams(&listOptions, scheme.ParameterCodec).
+		VersionedParams(&listOpts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Body(options).
-		Do().
+		Body(&opts).
+		Do(ctx).
 		Error()
 }
 
 // Patch applies the patch and returns the patched role.
-func (c *roles) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.Role, err error) {
+func (c *roles) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.Role, err error) {
 	result = &v1alpha1.Role{}
 	err = c.client.Patch(pt).
 		Namespace(c.ns).
 		Resource("roles").
-		SubResource(subresources...).
 		Name(name).
+		SubResource(subresources...).
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(data).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/rolebinding.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/rolebinding.go
index bf4e5a10..b4b1df5d 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/rolebinding.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/rolebinding.go
@@ -19,6 +19,7 @@ limitations under the License.
 package v1alpha1
 
 import (
+	"context"
 	"time"
 
 	v1alpha1 "k8s.io/api/rbac/v1alpha1"
@@ -37,14 +38,14 @@ type RoleBindingsGetter interface {
 
 // RoleBindingInterface has methods to work with RoleBinding resources.
 type RoleBindingInterface interface {
-	Create(*v1alpha1.RoleBinding) (*v1alpha1.RoleBinding, error)
-	Update(*v1alpha1.RoleBinding) (*v1alpha1.RoleBinding, error)
-	Delete(name string, options *v1.DeleteOptions) error
-	DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error
-	Get(name string, options v1.GetOptions) (*v1alpha1.RoleBinding, error)
-	List(opts v1.ListOptions) (*v1alpha1.RoleBindingList, error)
-	Watch(opts v1.ListOptions) (watch.Interface, error)
-	Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.RoleBinding, err error)
+	Create(ctx context.Context, roleBinding *v1alpha1.RoleBinding, opts v1.CreateOptions) (*v1alpha1.RoleBinding, error)
+	Update(ctx context.Context, roleBinding *v1alpha1.RoleBinding, opts v1.UpdateOptions) (*v1alpha1.RoleBinding, error)
+	Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
+	DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
+	Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.RoleBinding, error)
+	List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.RoleBindingList, error)
+	Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
+	Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.RoleBinding, err error)
 	RoleBindingExpansion
 }
 
@@ -63,20 +64,20 @@ func newRoleBindings(c *RbacV1alpha1Client, namespace string) *roleBindings {
 }
 
 // Get takes name of the roleBinding, and returns the corresponding roleBinding object, and an error if there is any.
-func (c *roleBindings) Get(name string, options v1.GetOptions) (result *v1alpha1.RoleBinding, err error) {
+func (c *roleBindings) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.RoleBinding, err error) {
 	result = &v1alpha1.RoleBinding{}
 	err = c.client.Get().
 		Namespace(c.ns).
 		Resource("rolebindings").
 		Name(name).
 		VersionedParams(&options, scheme.ParameterCodec).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // List takes label and field selectors, and returns the list of RoleBindings that match those selectors.
-func (c *roleBindings) List(opts v1.ListOptions) (result *v1alpha1.RoleBindingList, err error) {
+func (c *roleBindings) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.RoleBindingList, err error) {
 	var timeout time.Duration
 	if opts.TimeoutSeconds != nil {
 		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
@@ -87,13 +88,13 @@ func (c *roleBindings) List(opts v1.ListOptions) (result *v1alpha1.RoleBindingLi
 		Resource("rolebindings").
 		VersionedParams(&opts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Watch returns a watch.Interface that watches the requested roleBindings.
-func (c *roleBindings) Watch(opts v1.ListOptions) (watch.Interface, error) {
+func (c *roleBindings) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
 	var timeout time.Duration
 	if opts.TimeoutSeconds != nil {
 		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
@@ -104,71 +105,74 @@ func (c *roleBindings) Watch(opts v1.ListOptions) (watch.Interface, error) {
 		Resource("rolebindings").
 		VersionedParams(&opts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Watch()
+		Watch(ctx)
 }
 
 // Create takes the representation of a roleBinding and creates it.  Returns the server's representation of the roleBinding, and an error, if there is any.
-func (c *roleBindings) Create(roleBinding *v1alpha1.RoleBinding) (result *v1alpha1.RoleBinding, err error) {
+func (c *roleBindings) Create(ctx context.Context, roleBinding *v1alpha1.RoleBinding, opts v1.CreateOptions) (result *v1alpha1.RoleBinding, err error) {
 	result = &v1alpha1.RoleBinding{}
 	err = c.client.Post().
 		Namespace(c.ns).
 		Resource("rolebindings").
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(roleBinding).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Update takes the representation of a roleBinding and updates it. Returns the server's representation of the roleBinding, and an error, if there is any.
-func (c *roleBindings) Update(roleBinding *v1alpha1.RoleBinding) (result *v1alpha1.RoleBinding, err error) {
+func (c *roleBindings) Update(ctx context.Context, roleBinding *v1alpha1.RoleBinding, opts v1.UpdateOptions) (result *v1alpha1.RoleBinding, err error) {
 	result = &v1alpha1.RoleBinding{}
 	err = c.client.Put().
 		Namespace(c.ns).
 		Resource("rolebindings").
 		Name(roleBinding.Name).
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(roleBinding).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Delete takes name of the roleBinding and deletes it. Returns an error if one occurs.
-func (c *roleBindings) Delete(name string, options *v1.DeleteOptions) error {
+func (c *roleBindings) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
 	return c.client.Delete().
 		Namespace(c.ns).
 		Resource("rolebindings").
 		Name(name).
-		Body(options).
-		Do().
+		Body(&opts).
+		Do(ctx).
 		Error()
 }
 
 // DeleteCollection deletes a collection of objects.
-func (c *roleBindings) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
+func (c *roleBindings) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
 	var timeout time.Duration
-	if listOptions.TimeoutSeconds != nil {
-		timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second
+	if listOpts.TimeoutSeconds != nil {
+		timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
 	}
 	return c.client.Delete().
 		Namespace(c.ns).
 		Resource("rolebindings").
-		VersionedParams(&listOptions, scheme.ParameterCodec).
+		VersionedParams(&listOpts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Body(options).
-		Do().
+		Body(&opts).
+		Do(ctx).
 		Error()
 }
 
 // Patch applies the patch and returns the patched roleBinding.
-func (c *roleBindings) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.RoleBinding, err error) {
+func (c *roleBindings) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.RoleBinding, err error) {
 	result = &v1alpha1.RoleBinding{}
 	err = c.client.Patch(pt).
 		Namespace(c.ns).
 		Resource("rolebindings").
-		SubResource(subresources...).
 		Name(name).
+		SubResource(subresources...).
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(data).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/clusterrole.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/clusterrole.go
index 21d3cab3..4db46666 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/clusterrole.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/clusterrole.go
@@ -19,6 +19,7 @@ limitations under the License.
 package v1beta1
 
 import (
+	"context"
 	"time"
 
 	v1beta1 "k8s.io/api/rbac/v1beta1"
@@ -37,14 +38,14 @@ type ClusterRolesGetter interface {
 
 // ClusterRoleInterface has methods to work with ClusterRole resources.
 type ClusterRoleInterface interface {
-	Create(*v1beta1.ClusterRole) (*v1beta1.ClusterRole, error)
-	Update(*v1beta1.ClusterRole) (*v1beta1.ClusterRole, error)
-	Delete(name string, options *v1.DeleteOptions) error
-	DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error
-	Get(name string, options v1.GetOptions) (*v1beta1.ClusterRole, error)
-	List(opts v1.ListOptions) (*v1beta1.ClusterRoleList, error)
-	Watch(opts v1.ListOptions) (watch.Interface, error)
-	Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.ClusterRole, err error)
+	Create(ctx context.Context, clusterRole *v1beta1.ClusterRole, opts v1.CreateOptions) (*v1beta1.ClusterRole, error)
+	Update(ctx context.Context, clusterRole *v1beta1.ClusterRole, opts v1.UpdateOptions) (*v1beta1.ClusterRole, error)
+	Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
+	DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
+	Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.ClusterRole, error)
+	List(ctx context.Context, opts v1.ListOptions) (*v1beta1.ClusterRoleList, error)
+	Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
+	Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.ClusterRole, err error)
 	ClusterRoleExpansion
 }
 
@@ -61,19 +62,19 @@ func newClusterRoles(c *RbacV1beta1Client) *clusterRoles {
 }
 
 // Get takes name of the clusterRole, and returns the corresponding clusterRole object, and an error if there is any.
-func (c *clusterRoles) Get(name string, options v1.GetOptions) (result *v1beta1.ClusterRole, err error) {
+func (c *clusterRoles) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.ClusterRole, err error) {
 	result = &v1beta1.ClusterRole{}
 	err = c.client.Get().
 		Resource("clusterroles").
 		Name(name).
 		VersionedParams(&options, scheme.ParameterCodec).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // List takes label and field selectors, and returns the list of ClusterRoles that match those selectors.
-func (c *clusterRoles) List(opts v1.ListOptions) (result *v1beta1.ClusterRoleList, err error) {
+func (c *clusterRoles) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.ClusterRoleList, err error) {
 	var timeout time.Duration
 	if opts.TimeoutSeconds != nil {
 		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
@@ -83,13 +84,13 @@ func (c *clusterRoles) List(opts v1.ListOptions) (result *v1beta1.ClusterRoleLis
 		Resource("clusterroles").
 		VersionedParams(&opts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Watch returns a watch.Interface that watches the requested clusterRoles.
-func (c *clusterRoles) Watch(opts v1.ListOptions) (watch.Interface, error) {
+func (c *clusterRoles) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
 	var timeout time.Duration
 	if opts.TimeoutSeconds != nil {
 		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
@@ -99,66 +100,69 @@ func (c *clusterRoles) Watch(opts v1.ListOptions) (watch.Interface, error) {
 		Resource("clusterroles").
 		VersionedParams(&opts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Watch()
+		Watch(ctx)
 }
 
 // Create takes the representation of a clusterRole and creates it.  Returns the server's representation of the clusterRole, and an error, if there is any.
-func (c *clusterRoles) Create(clusterRole *v1beta1.ClusterRole) (result *v1beta1.ClusterRole, err error) {
+func (c *clusterRoles) Create(ctx context.Context, clusterRole *v1beta1.ClusterRole, opts v1.CreateOptions) (result *v1beta1.ClusterRole, err error) {
 	result = &v1beta1.ClusterRole{}
 	err = c.client.Post().
 		Resource("clusterroles").
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(clusterRole).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Update takes the representation of a clusterRole and updates it. Returns the server's representation of the clusterRole, and an error, if there is any.
-func (c *clusterRoles) Update(clusterRole *v1beta1.ClusterRole) (result *v1beta1.ClusterRole, err error) {
+func (c *clusterRoles) Update(ctx context.Context, clusterRole *v1beta1.ClusterRole, opts v1.UpdateOptions) (result *v1beta1.ClusterRole, err error) {
 	result = &v1beta1.ClusterRole{}
 	err = c.client.Put().
 		Resource("clusterroles").
 		Name(clusterRole.Name).
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(clusterRole).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Delete takes name of the clusterRole and deletes it. Returns an error if one occurs.
-func (c *clusterRoles) Delete(name string, options *v1.DeleteOptions) error {
+func (c *clusterRoles) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
 	return c.client.Delete().
 		Resource("clusterroles").
 		Name(name).
-		Body(options).
-		Do().
+		Body(&opts).
+		Do(ctx).
 		Error()
 }
 
 // DeleteCollection deletes a collection of objects.
-func (c *clusterRoles) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
+func (c *clusterRoles) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
 	var timeout time.Duration
-	if listOptions.TimeoutSeconds != nil {
-		timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second
+	if listOpts.TimeoutSeconds != nil {
+		timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
 	}
 	return c.client.Delete().
 		Resource("clusterroles").
-		VersionedParams(&listOptions, scheme.ParameterCodec).
+		VersionedParams(&listOpts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Body(options).
-		Do().
+		Body(&opts).
+		Do(ctx).
 		Error()
 }
 
 // Patch applies the patch and returns the patched clusterRole.
-func (c *clusterRoles) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.ClusterRole, err error) {
+func (c *clusterRoles) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.ClusterRole, err error) {
 	result = &v1beta1.ClusterRole{}
 	err = c.client.Patch(pt).
 		Resource("clusterroles").
-		SubResource(subresources...).
 		Name(name).
+		SubResource(subresources...).
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(data).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/clusterrolebinding.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/clusterrolebinding.go
index 47eb9e4e..f45777c2 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/clusterrolebinding.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/clusterrolebinding.go
@@ -19,6 +19,7 @@ limitations under the License.
 package v1beta1
 
 import (
+	"context"
 	"time"
 
 	v1beta1 "k8s.io/api/rbac/v1beta1"
@@ -37,14 +38,14 @@ type ClusterRoleBindingsGetter interface {
 
 // ClusterRoleBindingInterface has methods to work with ClusterRoleBinding resources.
 type ClusterRoleBindingInterface interface {
-	Create(*v1beta1.ClusterRoleBinding) (*v1beta1.ClusterRoleBinding, error)
-	Update(*v1beta1.ClusterRoleBinding) (*v1beta1.ClusterRoleBinding, error)
-	Delete(name string, options *v1.DeleteOptions) error
-	DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error
-	Get(name string, options v1.GetOptions) (*v1beta1.ClusterRoleBinding, error)
-	List(opts v1.ListOptions) (*v1beta1.ClusterRoleBindingList, error)
-	Watch(opts v1.ListOptions) (watch.Interface, error)
-	Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.ClusterRoleBinding, err error)
+	Create(ctx context.Context, clusterRoleBinding *v1beta1.ClusterRoleBinding, opts v1.CreateOptions) (*v1beta1.ClusterRoleBinding, error)
+	Update(ctx context.Context, clusterRoleBinding *v1beta1.ClusterRoleBinding, opts v1.UpdateOptions) (*v1beta1.ClusterRoleBinding, error)
+	Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
+	DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
+	Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.ClusterRoleBinding, error)
+	List(ctx context.Context, opts v1.ListOptions) (*v1beta1.ClusterRoleBindingList, error)
+	Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
+	Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.ClusterRoleBinding, err error)
 	ClusterRoleBindingExpansion
 }
 
@@ -61,19 +62,19 @@ func newClusterRoleBindings(c *RbacV1beta1Client) *clusterRoleBindings {
 }
 
 // Get takes name of the clusterRoleBinding, and returns the corresponding clusterRoleBinding object, and an error if there is any.
-func (c *clusterRoleBindings) Get(name string, options v1.GetOptions) (result *v1beta1.ClusterRoleBinding, err error) {
+func (c *clusterRoleBindings) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.ClusterRoleBinding, err error) {
 	result = &v1beta1.ClusterRoleBinding{}
 	err = c.client.Get().
 		Resource("clusterrolebindings").
 		Name(name).
 		VersionedParams(&options, scheme.ParameterCodec).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // List takes label and field selectors, and returns the list of ClusterRoleBindings that match those selectors.
-func (c *clusterRoleBindings) List(opts v1.ListOptions) (result *v1beta1.ClusterRoleBindingList, err error) {
+func (c *clusterRoleBindings) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.ClusterRoleBindingList, err error) {
 	var timeout time.Duration
 	if opts.TimeoutSeconds != nil {
 		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
@@ -83,13 +84,13 @@ func (c *clusterRoleBindings) List(opts v1.ListOptions) (result *v1beta1.Cluster
 		Resource("clusterrolebindings").
 		VersionedParams(&opts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Watch returns a watch.Interface that watches the requested clusterRoleBindings.
-func (c *clusterRoleBindings) Watch(opts v1.ListOptions) (watch.Interface, error) {
+func (c *clusterRoleBindings) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
 	var timeout time.Duration
 	if opts.TimeoutSeconds != nil {
 		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
@@ -99,66 +100,69 @@ func (c *clusterRoleBindings) Watch(opts v1.ListOptions) (watch.Interface, error
 		Resource("clusterrolebindings").
 		VersionedParams(&opts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Watch()
+		Watch(ctx)
 }
 
 // Create takes the representation of a clusterRoleBinding and creates it.  Returns the server's representation of the clusterRoleBinding, and an error, if there is any.
-func (c *clusterRoleBindings) Create(clusterRoleBinding *v1beta1.ClusterRoleBinding) (result *v1beta1.ClusterRoleBinding, err error) {
+func (c *clusterRoleBindings) Create(ctx context.Context, clusterRoleBinding *v1beta1.ClusterRoleBinding, opts v1.CreateOptions) (result *v1beta1.ClusterRoleBinding, err error) {
 	result = &v1beta1.ClusterRoleBinding{}
 	err = c.client.Post().
 		Resource("clusterrolebindings").
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(clusterRoleBinding).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Update takes the representation of a clusterRoleBinding and updates it. Returns the server's representation of the clusterRoleBinding, and an error, if there is any.
-func (c *clusterRoleBindings) Update(clusterRoleBinding *v1beta1.ClusterRoleBinding) (result *v1beta1.ClusterRoleBinding, err error) {
+func (c *clusterRoleBindings) Update(ctx context.Context, clusterRoleBinding *v1beta1.ClusterRoleBinding, opts v1.UpdateOptions) (result *v1beta1.ClusterRoleBinding, err error) {
 	result = &v1beta1.ClusterRoleBinding{}
 	err = c.client.Put().
 		Resource("clusterrolebindings").
 		Name(clusterRoleBinding.Name).
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(clusterRoleBinding).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Delete takes name of the clusterRoleBinding and deletes it. Returns an error if one occurs.
-func (c *clusterRoleBindings) Delete(name string, options *v1.DeleteOptions) error {
+func (c *clusterRoleBindings) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
 	return c.client.Delete().
 		Resource("clusterrolebindings").
 		Name(name).
-		Body(options).
-		Do().
+		Body(&opts).
+		Do(ctx).
 		Error()
 }
 
 // DeleteCollection deletes a collection of objects.
-func (c *clusterRoleBindings) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
+func (c *clusterRoleBindings) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
 	var timeout time.Duration
-	if listOptions.TimeoutSeconds != nil {
-		timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second
+	if listOpts.TimeoutSeconds != nil {
+		timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
 	}
 	return c.client.Delete().
 		Resource("clusterrolebindings").
-		VersionedParams(&listOptions, scheme.ParameterCodec).
+		VersionedParams(&listOpts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Body(options).
-		Do().
+		Body(&opts).
+		Do(ctx).
 		Error()
 }
 
 // Patch applies the patch and returns the patched clusterRoleBinding.
-func (c *clusterRoleBindings) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.ClusterRoleBinding, err error) {
+func (c *clusterRoleBindings) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.ClusterRoleBinding, err error) {
 	result = &v1beta1.ClusterRoleBinding{}
 	err = c.client.Patch(pt).
 		Resource("clusterrolebindings").
-		SubResource(subresources...).
 		Name(name).
+		SubResource(subresources...).
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(data).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/role.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/role.go
index 2b61aad5..c172e7f6 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/role.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/role.go
@@ -19,6 +19,7 @@ limitations under the License.
 package v1beta1
 
 import (
+	"context"
 	"time"
 
 	v1beta1 "k8s.io/api/rbac/v1beta1"
@@ -37,14 +38,14 @@ type RolesGetter interface {
 
 // RoleInterface has methods to work with Role resources.
 type RoleInterface interface {
-	Create(*v1beta1.Role) (*v1beta1.Role, error)
-	Update(*v1beta1.Role) (*v1beta1.Role, error)
-	Delete(name string, options *v1.DeleteOptions) error
-	DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error
-	Get(name string, options v1.GetOptions) (*v1beta1.Role, error)
-	List(opts v1.ListOptions) (*v1beta1.RoleList, error)
-	Watch(opts v1.ListOptions) (watch.Interface, error)
-	Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.Role, err error)
+	Create(ctx context.Context, role *v1beta1.Role, opts v1.CreateOptions) (*v1beta1.Role, error)
+	Update(ctx context.Context, role *v1beta1.Role, opts v1.UpdateOptions) (*v1beta1.Role, error)
+	Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
+	DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
+	Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.Role, error)
+	List(ctx context.Context, opts v1.ListOptions) (*v1beta1.RoleList, error)
+	Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
+	Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.Role, err error)
 	RoleExpansion
 }
 
@@ -63,20 +64,20 @@ func newRoles(c *RbacV1beta1Client, namespace string) *roles {
 }
 
 // Get takes name of the role, and returns the corresponding role object, and an error if there is any.
-func (c *roles) Get(name string, options v1.GetOptions) (result *v1beta1.Role, err error) {
+func (c *roles) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.Role, err error) {
 	result = &v1beta1.Role{}
 	err = c.client.Get().
 		Namespace(c.ns).
 		Resource("roles").
 		Name(name).
 		VersionedParams(&options, scheme.ParameterCodec).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // List takes label and field selectors, and returns the list of Roles that match those selectors.
-func (c *roles) List(opts v1.ListOptions) (result *v1beta1.RoleList, err error) {
+func (c *roles) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.RoleList, err error) {
 	var timeout time.Duration
 	if opts.TimeoutSeconds != nil {
 		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
@@ -87,13 +88,13 @@ func (c *roles) List(opts v1.ListOptions) (result *v1beta1.RoleList, err error)
 		Resource("roles").
 		VersionedParams(&opts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Watch returns a watch.Interface that watches the requested roles.
-func (c *roles) Watch(opts v1.ListOptions) (watch.Interface, error) {
+func (c *roles) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
 	var timeout time.Duration
 	if opts.TimeoutSeconds != nil {
 		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
@@ -104,71 +105,74 @@ func (c *roles) Watch(opts v1.ListOptions) (watch.Interface, error) {
 		Resource("roles").
 		VersionedParams(&opts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Watch()
+		Watch(ctx)
 }
 
 // Create takes the representation of a role and creates it.  Returns the server's representation of the role, and an error, if there is any.
-func (c *roles) Create(role *v1beta1.Role) (result *v1beta1.Role, err error) {
+func (c *roles) Create(ctx context.Context, role *v1beta1.Role, opts v1.CreateOptions) (result *v1beta1.Role, err error) {
 	result = &v1beta1.Role{}
 	err = c.client.Post().
 		Namespace(c.ns).
 		Resource("roles").
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(role).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Update takes the representation of a role and updates it. Returns the server's representation of the role, and an error, if there is any.
-func (c *roles) Update(role *v1beta1.Role) (result *v1beta1.Role, err error) {
+func (c *roles) Update(ctx context.Context, role *v1beta1.Role, opts v1.UpdateOptions) (result *v1beta1.Role, err error) {
 	result = &v1beta1.Role{}
 	err = c.client.Put().
 		Namespace(c.ns).
 		Resource("roles").
 		Name(role.Name).
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(role).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Delete takes name of the role and deletes it. Returns an error if one occurs.
-func (c *roles) Delete(name string, options *v1.DeleteOptions) error {
+func (c *roles) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
 	return c.client.Delete().
 		Namespace(c.ns).
 		Resource("roles").
 		Name(name).
-		Body(options).
-		Do().
+		Body(&opts).
+		Do(ctx).
 		Error()
 }
 
 // DeleteCollection deletes a collection of objects.
-func (c *roles) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
+func (c *roles) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
 	var timeout time.Duration
-	if listOptions.TimeoutSeconds != nil {
-		timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second
+	if listOpts.TimeoutSeconds != nil {
+		timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
 	}
 	return c.client.Delete().
 		Namespace(c.ns).
 		Resource("roles").
-		VersionedParams(&listOptions, scheme.ParameterCodec).
+		VersionedParams(&listOpts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Body(options).
-		Do().
+		Body(&opts).
+		Do(ctx).
 		Error()
 }
 
 // Patch applies the patch and returns the patched role.
-func (c *roles) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.Role, err error) {
+func (c *roles) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.Role, err error) {
 	result = &v1beta1.Role{}
 	err = c.client.Patch(pt).
 		Namespace(c.ns).
 		Resource("roles").
-		SubResource(subresources...).
 		Name(name).
+		SubResource(subresources...).
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(data).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/rolebinding.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/rolebinding.go
index 0bd118fd..f37bfb74 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/rolebinding.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/rolebinding.go
@@ -19,6 +19,7 @@ limitations under the License.
 package v1beta1
 
 import (
+	"context"
 	"time"
 
 	v1beta1 "k8s.io/api/rbac/v1beta1"
@@ -37,14 +38,14 @@ type RoleBindingsGetter interface {
 
 // RoleBindingInterface has methods to work with RoleBinding resources.
 type RoleBindingInterface interface {
-	Create(*v1beta1.RoleBinding) (*v1beta1.RoleBinding, error)
-	Update(*v1beta1.RoleBinding) (*v1beta1.RoleBinding, error)
-	Delete(name string, options *v1.DeleteOptions) error
-	DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error
-	Get(name string, options v1.GetOptions) (*v1beta1.RoleBinding, error)
-	List(opts v1.ListOptions) (*v1beta1.RoleBindingList, error)
-	Watch(opts v1.ListOptions) (watch.Interface, error)
-	Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.RoleBinding, err error)
+	Create(ctx context.Context, roleBinding *v1beta1.RoleBinding, opts v1.CreateOptions) (*v1beta1.RoleBinding, error)
+	Update(ctx context.Context, roleBinding *v1beta1.RoleBinding, opts v1.UpdateOptions) (*v1beta1.RoleBinding, error)
+	Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
+	DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
+	Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.RoleBinding, error)
+	List(ctx context.Context, opts v1.ListOptions) (*v1beta1.RoleBindingList, error)
+	Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
+	Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.RoleBinding, err error)
 	RoleBindingExpansion
 }
 
@@ -63,20 +64,20 @@ func newRoleBindings(c *RbacV1beta1Client, namespace string) *roleBindings {
 }
 
 // Get takes name of the roleBinding, and returns the corresponding roleBinding object, and an error if there is any.
-func (c *roleBindings) Get(name string, options v1.GetOptions) (result *v1beta1.RoleBinding, err error) {
+func (c *roleBindings) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.RoleBinding, err error) {
 	result = &v1beta1.RoleBinding{}
 	err = c.client.Get().
 		Namespace(c.ns).
 		Resource("rolebindings").
 		Name(name).
 		VersionedParams(&options, scheme.ParameterCodec).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // List takes label and field selectors, and returns the list of RoleBindings that match those selectors.
-func (c *roleBindings) List(opts v1.ListOptions) (result *v1beta1.RoleBindingList, err error) {
+func (c *roleBindings) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.RoleBindingList, err error) {
 	var timeout time.Duration
 	if opts.TimeoutSeconds != nil {
 		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
@@ -87,13 +88,13 @@ func (c *roleBindings) List(opts v1.ListOptions) (result *v1beta1.RoleBindingLis
 		Resource("rolebindings").
 		VersionedParams(&opts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Watch returns a watch.Interface that watches the requested roleBindings.
-func (c *roleBindings) Watch(opts v1.ListOptions) (watch.Interface, error) {
+func (c *roleBindings) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
 	var timeout time.Duration
 	if opts.TimeoutSeconds != nil {
 		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
@@ -104,71 +105,74 @@ func (c *roleBindings) Watch(opts v1.ListOptions) (watch.Interface, error) {
 		Resource("rolebindings").
 		VersionedParams(&opts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Watch()
+		Watch(ctx)
 }
 
 // Create takes the representation of a roleBinding and creates it.  Returns the server's representation of the roleBinding, and an error, if there is any.
-func (c *roleBindings) Create(roleBinding *v1beta1.RoleBinding) (result *v1beta1.RoleBinding, err error) {
+func (c *roleBindings) Create(ctx context.Context, roleBinding *v1beta1.RoleBinding, opts v1.CreateOptions) (result *v1beta1.RoleBinding, err error) {
 	result = &v1beta1.RoleBinding{}
 	err = c.client.Post().
 		Namespace(c.ns).
 		Resource("rolebindings").
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(roleBinding).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Update takes the representation of a roleBinding and updates it. Returns the server's representation of the roleBinding, and an error, if there is any.
-func (c *roleBindings) Update(roleBinding *v1beta1.RoleBinding) (result *v1beta1.RoleBinding, err error) {
+func (c *roleBindings) Update(ctx context.Context, roleBinding *v1beta1.RoleBinding, opts v1.UpdateOptions) (result *v1beta1.RoleBinding, err error) {
 	result = &v1beta1.RoleBinding{}
 	err = c.client.Put().
 		Namespace(c.ns).
 		Resource("rolebindings").
 		Name(roleBinding.Name).
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(roleBinding).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Delete takes name of the roleBinding and deletes it. Returns an error if one occurs.
-func (c *roleBindings) Delete(name string, options *v1.DeleteOptions) error {
+func (c *roleBindings) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
 	return c.client.Delete().
 		Namespace(c.ns).
 		Resource("rolebindings").
 		Name(name).
-		Body(options).
-		Do().
+		Body(&opts).
+		Do(ctx).
 		Error()
 }
 
 // DeleteCollection deletes a collection of objects.
-func (c *roleBindings) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
+func (c *roleBindings) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
 	var timeout time.Duration
-	if listOptions.TimeoutSeconds != nil {
-		timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second
+	if listOpts.TimeoutSeconds != nil {
+		timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
 	}
 	return c.client.Delete().
 		Namespace(c.ns).
 		Resource("rolebindings").
-		VersionedParams(&listOptions, scheme.ParameterCodec).
+		VersionedParams(&listOpts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Body(options).
-		Do().
+		Body(&opts).
+		Do(ctx).
 		Error()
 }
 
 // Patch applies the patch and returns the patched roleBinding.
-func (c *roleBindings) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.RoleBinding, err error) {
+func (c *roleBindings) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.RoleBinding, err error) {
 	result = &v1beta1.RoleBinding{}
 	err = c.client.Patch(pt).
 		Namespace(c.ns).
 		Resource("rolebindings").
-		SubResource(subresources...).
 		Name(name).
+		SubResource(subresources...).
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(data).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1/priorityclass.go b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1/priorityclass.go
index 3abbb7b8..06185d5f 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1/priorityclass.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1/priorityclass.go
@@ -19,6 +19,7 @@ limitations under the License.
 package v1
 
 import (
+	"context"
 	"time"
 
 	v1 "k8s.io/api/scheduling/v1"
@@ -37,14 +38,14 @@ type PriorityClassesGetter interface {
 
 // PriorityClassInterface has methods to work with PriorityClass resources.
 type PriorityClassInterface interface {
-	Create(*v1.PriorityClass) (*v1.PriorityClass, error)
-	Update(*v1.PriorityClass) (*v1.PriorityClass, error)
-	Delete(name string, options *metav1.DeleteOptions) error
-	DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error
-	Get(name string, options metav1.GetOptions) (*v1.PriorityClass, error)
-	List(opts metav1.ListOptions) (*v1.PriorityClassList, error)
-	Watch(opts metav1.ListOptions) (watch.Interface, error)
-	Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.PriorityClass, err error)
+	Create(ctx context.Context, priorityClass *v1.PriorityClass, opts metav1.CreateOptions) (*v1.PriorityClass, error)
+	Update(ctx context.Context, priorityClass *v1.PriorityClass, opts metav1.UpdateOptions) (*v1.PriorityClass, error)
+	Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error
+	DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error
+	Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.PriorityClass, error)
+	List(ctx context.Context, opts metav1.ListOptions) (*v1.PriorityClassList, error)
+	Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error)
+	Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.PriorityClass, err error)
 	PriorityClassExpansion
 }
 
@@ -61,19 +62,19 @@ func newPriorityClasses(c *SchedulingV1Client) *priorityClasses {
 }
 
 // Get takes name of the priorityClass, and returns the corresponding priorityClass object, and an error if there is any.
-func (c *priorityClasses) Get(name string, options metav1.GetOptions) (result *v1.PriorityClass, err error) {
+func (c *priorityClasses) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.PriorityClass, err error) {
 	result = &v1.PriorityClass{}
 	err = c.client.Get().
 		Resource("priorityclasses").
 		Name(name).
 		VersionedParams(&options, scheme.ParameterCodec).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // List takes label and field selectors, and returns the list of PriorityClasses that match those selectors.
-func (c *priorityClasses) List(opts metav1.ListOptions) (result *v1.PriorityClassList, err error) {
+func (c *priorityClasses) List(ctx context.Context, opts metav1.ListOptions) (result *v1.PriorityClassList, err error) {
 	var timeout time.Duration
 	if opts.TimeoutSeconds != nil {
 		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
@@ -83,13 +84,13 @@ func (c *priorityClasses) List(opts metav1.ListOptions) (result *v1.PriorityClas
 		Resource("priorityclasses").
 		VersionedParams(&opts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Watch returns a watch.Interface that watches the requested priorityClasses.
-func (c *priorityClasses) Watch(opts metav1.ListOptions) (watch.Interface, error) {
+func (c *priorityClasses) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
 	var timeout time.Duration
 	if opts.TimeoutSeconds != nil {
 		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
@@ -99,66 +100,69 @@ func (c *priorityClasses) Watch(opts metav1.ListOptions) (watch.Interface, error
 		Resource("priorityclasses").
 		VersionedParams(&opts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Watch()
+		Watch(ctx)
 }
 
 // Create takes the representation of a priorityClass and creates it.  Returns the server's representation of the priorityClass, and an error, if there is any.
-func (c *priorityClasses) Create(priorityClass *v1.PriorityClass) (result *v1.PriorityClass, err error) {
+func (c *priorityClasses) Create(ctx context.Context, priorityClass *v1.PriorityClass, opts metav1.CreateOptions) (result *v1.PriorityClass, err error) {
 	result = &v1.PriorityClass{}
 	err = c.client.Post().
 		Resource("priorityclasses").
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(priorityClass).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Update takes the representation of a priorityClass and updates it. Returns the server's representation of the priorityClass, and an error, if there is any.
-func (c *priorityClasses) Update(priorityClass *v1.PriorityClass) (result *v1.PriorityClass, err error) {
+func (c *priorityClasses) Update(ctx context.Context, priorityClass *v1.PriorityClass, opts metav1.UpdateOptions) (result *v1.PriorityClass, err error) {
 	result = &v1.PriorityClass{}
 	err = c.client.Put().
 		Resource("priorityclasses").
 		Name(priorityClass.Name).
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(priorityClass).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Delete takes name of the priorityClass and deletes it. Returns an error if one occurs.
-func (c *priorityClasses) Delete(name string, options *metav1.DeleteOptions) error {
+func (c *priorityClasses) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error {
 	return c.client.Delete().
 		Resource("priorityclasses").
 		Name(name).
-		Body(options).
-		Do().
+		Body(&opts).
+		Do(ctx).
 		Error()
 }
 
 // DeleteCollection deletes a collection of objects.
-func (c *priorityClasses) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error {
+func (c *priorityClasses) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error {
 	var timeout time.Duration
-	if listOptions.TimeoutSeconds != nil {
-		timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second
+	if listOpts.TimeoutSeconds != nil {
+		timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
 	}
 	return c.client.Delete().
 		Resource("priorityclasses").
-		VersionedParams(&listOptions, scheme.ParameterCodec).
+		VersionedParams(&listOpts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Body(options).
-		Do().
+		Body(&opts).
+		Do(ctx).
 		Error()
 }
 
 // Patch applies the patch and returns the patched priorityClass.
-func (c *priorityClasses) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.PriorityClass, err error) {
+func (c *priorityClasses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.PriorityClass, err error) {
 	result = &v1.PriorityClass{}
 	err = c.client.Patch(pt).
 		Resource("priorityclasses").
-		SubResource(subresources...).
 		Name(name).
+		SubResource(subresources...).
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(data).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/priorityclass.go b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/priorityclass.go
index 29d646fb..ae9875e9 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/priorityclass.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/priorityclass.go
@@ -19,6 +19,7 @@ limitations under the License.
 package v1alpha1
 
 import (
+	"context"
 	"time"
 
 	v1alpha1 "k8s.io/api/scheduling/v1alpha1"
@@ -37,14 +38,14 @@ type PriorityClassesGetter interface {
 
 // PriorityClassInterface has methods to work with PriorityClass resources.
 type PriorityClassInterface interface {
-	Create(*v1alpha1.PriorityClass) (*v1alpha1.PriorityClass, error)
-	Update(*v1alpha1.PriorityClass) (*v1alpha1.PriorityClass, error)
-	Delete(name string, options *v1.DeleteOptions) error
-	DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error
-	Get(name string, options v1.GetOptions) (*v1alpha1.PriorityClass, error)
-	List(opts v1.ListOptions) (*v1alpha1.PriorityClassList, error)
-	Watch(opts v1.ListOptions) (watch.Interface, error)
-	Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.PriorityClass, err error)
+	Create(ctx context.Context, priorityClass *v1alpha1.PriorityClass, opts v1.CreateOptions) (*v1alpha1.PriorityClass, error)
+	Update(ctx context.Context, priorityClass *v1alpha1.PriorityClass, opts v1.UpdateOptions) (*v1alpha1.PriorityClass, error)
+	Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
+	DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
+	Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.PriorityClass, error)
+	List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.PriorityClassList, error)
+	Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
+	Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.PriorityClass, err error)
 	PriorityClassExpansion
 }
 
@@ -61,19 +62,19 @@ func newPriorityClasses(c *SchedulingV1alpha1Client) *priorityClasses {
 }
 
 // Get takes name of the priorityClass, and returns the corresponding priorityClass object, and an error if there is any.
-func (c *priorityClasses) Get(name string, options v1.GetOptions) (result *v1alpha1.PriorityClass, err error) {
+func (c *priorityClasses) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.PriorityClass, err error) {
 	result = &v1alpha1.PriorityClass{}
 	err = c.client.Get().
 		Resource("priorityclasses").
 		Name(name).
 		VersionedParams(&options, scheme.ParameterCodec).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // List takes label and field selectors, and returns the list of PriorityClasses that match those selectors.
-func (c *priorityClasses) List(opts v1.ListOptions) (result *v1alpha1.PriorityClassList, err error) {
+func (c *priorityClasses) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.PriorityClassList, err error) {
 	var timeout time.Duration
 	if opts.TimeoutSeconds != nil {
 		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
@@ -83,13 +84,13 @@ func (c *priorityClasses) List(opts v1.ListOptions) (result *v1alpha1.PriorityCl
 		Resource("priorityclasses").
 		VersionedParams(&opts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Watch returns a watch.Interface that watches the requested priorityClasses.
-func (c *priorityClasses) Watch(opts v1.ListOptions) (watch.Interface, error) {
+func (c *priorityClasses) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
 	var timeout time.Duration
 	if opts.TimeoutSeconds != nil {
 		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
@@ -99,66 +100,69 @@ func (c *priorityClasses) Watch(opts v1.ListOptions) (watch.Interface, error) {
 		Resource("priorityclasses").
 		VersionedParams(&opts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Watch()
+		Watch(ctx)
 }
 
 // Create takes the representation of a priorityClass and creates it.  Returns the server's representation of the priorityClass, and an error, if there is any.
-func (c *priorityClasses) Create(priorityClass *v1alpha1.PriorityClass) (result *v1alpha1.PriorityClass, err error) {
+func (c *priorityClasses) Create(ctx context.Context, priorityClass *v1alpha1.PriorityClass, opts v1.CreateOptions) (result *v1alpha1.PriorityClass, err error) {
 	result = &v1alpha1.PriorityClass{}
 	err = c.client.Post().
 		Resource("priorityclasses").
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(priorityClass).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Update takes the representation of a priorityClass and updates it. Returns the server's representation of the priorityClass, and an error, if there is any.
-func (c *priorityClasses) Update(priorityClass *v1alpha1.PriorityClass) (result *v1alpha1.PriorityClass, err error) {
+func (c *priorityClasses) Update(ctx context.Context, priorityClass *v1alpha1.PriorityClass, opts v1.UpdateOptions) (result *v1alpha1.PriorityClass, err error) {
 	result = &v1alpha1.PriorityClass{}
 	err = c.client.Put().
 		Resource("priorityclasses").
 		Name(priorityClass.Name).
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(priorityClass).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Delete takes name of the priorityClass and deletes it. Returns an error if one occurs.
-func (c *priorityClasses) Delete(name string, options *v1.DeleteOptions) error {
+func (c *priorityClasses) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
 	return c.client.Delete().
 		Resource("priorityclasses").
 		Name(name).
-		Body(options).
-		Do().
+		Body(&opts).
+		Do(ctx).
 		Error()
 }
 
 // DeleteCollection deletes a collection of objects.
-func (c *priorityClasses) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
+func (c *priorityClasses) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
 	var timeout time.Duration
-	if listOptions.TimeoutSeconds != nil {
-		timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second
+	if listOpts.TimeoutSeconds != nil {
+		timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
 	}
 	return c.client.Delete().
 		Resource("priorityclasses").
-		VersionedParams(&listOptions, scheme.ParameterCodec).
+		VersionedParams(&listOpts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Body(options).
-		Do().
+		Body(&opts).
+		Do(ctx).
 		Error()
 }
 
 // Patch applies the patch and returns the patched priorityClass.
-func (c *priorityClasses) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.PriorityClass, err error) {
+func (c *priorityClasses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.PriorityClass, err error) {
 	result = &v1alpha1.PriorityClass{}
 	err = c.client.Patch(pt).
 		Resource("priorityclasses").
-		SubResource(subresources...).
 		Name(name).
+		SubResource(subresources...).
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(data).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/priorityclass.go b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/priorityclass.go
index 5e402f8e..70ed597b 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/priorityclass.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/priorityclass.go
@@ -19,6 +19,7 @@ limitations under the License.
 package v1beta1
 
 import (
+	"context"
 	"time"
 
 	v1beta1 "k8s.io/api/scheduling/v1beta1"
@@ -37,14 +38,14 @@ type PriorityClassesGetter interface {
 
 // PriorityClassInterface has methods to work with PriorityClass resources.
 type PriorityClassInterface interface {
-	Create(*v1beta1.PriorityClass) (*v1beta1.PriorityClass, error)
-	Update(*v1beta1.PriorityClass) (*v1beta1.PriorityClass, error)
-	Delete(name string, options *v1.DeleteOptions) error
-	DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error
-	Get(name string, options v1.GetOptions) (*v1beta1.PriorityClass, error)
-	List(opts v1.ListOptions) (*v1beta1.PriorityClassList, error)
-	Watch(opts v1.ListOptions) (watch.Interface, error)
-	Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.PriorityClass, err error)
+	Create(ctx context.Context, priorityClass *v1beta1.PriorityClass, opts v1.CreateOptions) (*v1beta1.PriorityClass, error)
+	Update(ctx context.Context, priorityClass *v1beta1.PriorityClass, opts v1.UpdateOptions) (*v1beta1.PriorityClass, error)
+	Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
+	DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
+	Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.PriorityClass, error)
+	List(ctx context.Context, opts v1.ListOptions) (*v1beta1.PriorityClassList, error)
+	Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
+	Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.PriorityClass, err error)
 	PriorityClassExpansion
 }
 
@@ -61,19 +62,19 @@ func newPriorityClasses(c *SchedulingV1beta1Client) *priorityClasses {
 }
 
 // Get takes name of the priorityClass, and returns the corresponding priorityClass object, and an error if there is any.
-func (c *priorityClasses) Get(name string, options v1.GetOptions) (result *v1beta1.PriorityClass, err error) {
+func (c *priorityClasses) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.PriorityClass, err error) {
 	result = &v1beta1.PriorityClass{}
 	err = c.client.Get().
 		Resource("priorityclasses").
 		Name(name).
 		VersionedParams(&options, scheme.ParameterCodec).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // List takes label and field selectors, and returns the list of PriorityClasses that match those selectors.
-func (c *priorityClasses) List(opts v1.ListOptions) (result *v1beta1.PriorityClassList, err error) {
+func (c *priorityClasses) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.PriorityClassList, err error) {
 	var timeout time.Duration
 	if opts.TimeoutSeconds != nil {
 		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
@@ -83,13 +84,13 @@ func (c *priorityClasses) List(opts v1.ListOptions) (result *v1beta1.PriorityCla
 		Resource("priorityclasses").
 		VersionedParams(&opts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Watch returns a watch.Interface that watches the requested priorityClasses.
-func (c *priorityClasses) Watch(opts v1.ListOptions) (watch.Interface, error) {
+func (c *priorityClasses) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
 	var timeout time.Duration
 	if opts.TimeoutSeconds != nil {
 		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
@@ -99,66 +100,69 @@ func (c *priorityClasses) Watch(opts v1.ListOptions) (watch.Interface, error) {
 		Resource("priorityclasses").
 		VersionedParams(&opts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Watch()
+		Watch(ctx)
 }
 
 // Create takes the representation of a priorityClass and creates it.  Returns the server's representation of the priorityClass, and an error, if there is any.
-func (c *priorityClasses) Create(priorityClass *v1beta1.PriorityClass) (result *v1beta1.PriorityClass, err error) {
+func (c *priorityClasses) Create(ctx context.Context, priorityClass *v1beta1.PriorityClass, opts v1.CreateOptions) (result *v1beta1.PriorityClass, err error) {
 	result = &v1beta1.PriorityClass{}
 	err = c.client.Post().
 		Resource("priorityclasses").
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(priorityClass).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Update takes the representation of a priorityClass and updates it. Returns the server's representation of the priorityClass, and an error, if there is any.
-func (c *priorityClasses) Update(priorityClass *v1beta1.PriorityClass) (result *v1beta1.PriorityClass, err error) {
+func (c *priorityClasses) Update(ctx context.Context, priorityClass *v1beta1.PriorityClass, opts v1.UpdateOptions) (result *v1beta1.PriorityClass, err error) {
 	result = &v1beta1.PriorityClass{}
 	err = c.client.Put().
 		Resource("priorityclasses").
 		Name(priorityClass.Name).
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(priorityClass).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Delete takes name of the priorityClass and deletes it. Returns an error if one occurs.
-func (c *priorityClasses) Delete(name string, options *v1.DeleteOptions) error {
+func (c *priorityClasses) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
 	return c.client.Delete().
 		Resource("priorityclasses").
 		Name(name).
-		Body(options).
-		Do().
+		Body(&opts).
+		Do(ctx).
 		Error()
 }
 
 // DeleteCollection deletes a collection of objects.
-func (c *priorityClasses) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
+func (c *priorityClasses) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
 	var timeout time.Duration
-	if listOptions.TimeoutSeconds != nil {
-		timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second
+	if listOpts.TimeoutSeconds != nil {
+		timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
 	}
 	return c.client.Delete().
 		Resource("priorityclasses").
-		VersionedParams(&listOptions, scheme.ParameterCodec).
+		VersionedParams(&listOpts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Body(options).
-		Do().
+		Body(&opts).
+		Do(ctx).
 		Error()
 }
 
 // Patch applies the patch and returns the patched priorityClass.
-func (c *priorityClasses) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.PriorityClass, err error) {
+func (c *priorityClasses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.PriorityClass, err error) {
 	result = &v1beta1.PriorityClass{}
 	err = c.client.Patch(pt).
 		Resource("priorityclasses").
-		SubResource(subresources...).
 		Name(name).
+		SubResource(subresources...).
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(data).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/podpreset.go b/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/podpreset.go
index 8fd6adc5..aa1cb364 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/podpreset.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/podpreset.go
@@ -19,6 +19,7 @@ limitations under the License.
 package v1alpha1
 
 import (
+	"context"
 	"time"
 
 	v1alpha1 "k8s.io/api/settings/v1alpha1"
@@ -37,14 +38,14 @@ type PodPresetsGetter interface {
 
 // PodPresetInterface has methods to work with PodPreset resources.
 type PodPresetInterface interface {
-	Create(*v1alpha1.PodPreset) (*v1alpha1.PodPreset, error)
-	Update(*v1alpha1.PodPreset) (*v1alpha1.PodPreset, error)
-	Delete(name string, options *v1.DeleteOptions) error
-	DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error
-	Get(name string, options v1.GetOptions) (*v1alpha1.PodPreset, error)
-	List(opts v1.ListOptions) (*v1alpha1.PodPresetList, error)
-	Watch(opts v1.ListOptions) (watch.Interface, error)
-	Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.PodPreset, err error)
+	Create(ctx context.Context, podPreset *v1alpha1.PodPreset, opts v1.CreateOptions) (*v1alpha1.PodPreset, error)
+	Update(ctx context.Context, podPreset *v1alpha1.PodPreset, opts v1.UpdateOptions) (*v1alpha1.PodPreset, error)
+	Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
+	DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
+	Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.PodPreset, error)
+	List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.PodPresetList, error)
+	Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
+	Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.PodPreset, err error)
 	PodPresetExpansion
 }
 
@@ -63,20 +64,20 @@ func newPodPresets(c *SettingsV1alpha1Client, namespace string) *podPresets {
 }
 
 // Get takes name of the podPreset, and returns the corresponding podPreset object, and an error if there is any.
-func (c *podPresets) Get(name string, options v1.GetOptions) (result *v1alpha1.PodPreset, err error) {
+func (c *podPresets) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.PodPreset, err error) {
 	result = &v1alpha1.PodPreset{}
 	err = c.client.Get().
 		Namespace(c.ns).
 		Resource("podpresets").
 		Name(name).
 		VersionedParams(&options, scheme.ParameterCodec).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // List takes label and field selectors, and returns the list of PodPresets that match those selectors.
-func (c *podPresets) List(opts v1.ListOptions) (result *v1alpha1.PodPresetList, err error) {
+func (c *podPresets) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.PodPresetList, err error) {
 	var timeout time.Duration
 	if opts.TimeoutSeconds != nil {
 		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
@@ -87,13 +88,13 @@ func (c *podPresets) List(opts v1.ListOptions) (result *v1alpha1.PodPresetList,
 		Resource("podpresets").
 		VersionedParams(&opts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Watch returns a watch.Interface that watches the requested podPresets.
-func (c *podPresets) Watch(opts v1.ListOptions) (watch.Interface, error) {
+func (c *podPresets) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
 	var timeout time.Duration
 	if opts.TimeoutSeconds != nil {
 		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
@@ -104,71 +105,74 @@ func (c *podPresets) Watch(opts v1.ListOptions) (watch.Interface, error) {
 		Resource("podpresets").
 		VersionedParams(&opts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Watch()
+		Watch(ctx)
 }
 
 // Create takes the representation of a podPreset and creates it.  Returns the server's representation of the podPreset, and an error, if there is any.
-func (c *podPresets) Create(podPreset *v1alpha1.PodPreset) (result *v1alpha1.PodPreset, err error) {
+func (c *podPresets) Create(ctx context.Context, podPreset *v1alpha1.PodPreset, opts v1.CreateOptions) (result *v1alpha1.PodPreset, err error) {
 	result = &v1alpha1.PodPreset{}
 	err = c.client.Post().
 		Namespace(c.ns).
 		Resource("podpresets").
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(podPreset).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Update takes the representation of a podPreset and updates it. Returns the server's representation of the podPreset, and an error, if there is any.
-func (c *podPresets) Update(podPreset *v1alpha1.PodPreset) (result *v1alpha1.PodPreset, err error) {
+func (c *podPresets) Update(ctx context.Context, podPreset *v1alpha1.PodPreset, opts v1.UpdateOptions) (result *v1alpha1.PodPreset, err error) {
 	result = &v1alpha1.PodPreset{}
 	err = c.client.Put().
 		Namespace(c.ns).
 		Resource("podpresets").
 		Name(podPreset.Name).
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(podPreset).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Delete takes name of the podPreset and deletes it. Returns an error if one occurs.
-func (c *podPresets) Delete(name string, options *v1.DeleteOptions) error {
+func (c *podPresets) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
 	return c.client.Delete().
 		Namespace(c.ns).
 		Resource("podpresets").
 		Name(name).
-		Body(options).
-		Do().
+		Body(&opts).
+		Do(ctx).
 		Error()
 }
 
 // DeleteCollection deletes a collection of objects.
-func (c *podPresets) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
+func (c *podPresets) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
 	var timeout time.Duration
-	if listOptions.TimeoutSeconds != nil {
-		timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second
+	if listOpts.TimeoutSeconds != nil {
+		timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
 	}
 	return c.client.Delete().
 		Namespace(c.ns).
 		Resource("podpresets").
-		VersionedParams(&listOptions, scheme.ParameterCodec).
+		VersionedParams(&listOpts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Body(options).
-		Do().
+		Body(&opts).
+		Do(ctx).
 		Error()
 }
 
 // Patch applies the patch and returns the patched podPreset.
-func (c *podPresets) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.PodPreset, err error) {
+func (c *podPresets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.PodPreset, err error) {
 	result = &v1alpha1.PodPreset{}
 	err = c.client.Patch(pt).
 		Namespace(c.ns).
 		Resource("podpresets").
-		SubResource(subresources...).
 		Name(name).
+		SubResource(subresources...).
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(data).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/csidriver.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/csidriver.go
new file mode 100644
index 00000000..92e82251
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/csidriver.go
@@ -0,0 +1,168 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1
+
+import (
+	"context"
+	"time"
+
+	v1 "k8s.io/api/storage/v1"
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	types "k8s.io/apimachinery/pkg/types"
+	watch "k8s.io/apimachinery/pkg/watch"
+	scheme "k8s.io/client-go/kubernetes/scheme"
+	rest "k8s.io/client-go/rest"
+)
+
+// CSIDriversGetter has a method to return a CSIDriverInterface.
+// A group's client should implement this interface.
+type CSIDriversGetter interface {
+	CSIDrivers() CSIDriverInterface
+}
+
+// CSIDriverInterface has methods to work with CSIDriver resources.
+type CSIDriverInterface interface {
+	Create(ctx context.Context, cSIDriver *v1.CSIDriver, opts metav1.CreateOptions) (*v1.CSIDriver, error)
+	Update(ctx context.Context, cSIDriver *v1.CSIDriver, opts metav1.UpdateOptions) (*v1.CSIDriver, error)
+	Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error
+	DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error
+	Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.CSIDriver, error)
+	List(ctx context.Context, opts metav1.ListOptions) (*v1.CSIDriverList, error)
+	Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error)
+	Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.CSIDriver, err error)
+	CSIDriverExpansion
+}
+
+// cSIDrivers implements CSIDriverInterface
+type cSIDrivers struct {
+	client rest.Interface
+}
+
+// newCSIDrivers returns a CSIDrivers
+func newCSIDrivers(c *StorageV1Client) *cSIDrivers {
+	return &cSIDrivers{
+		client: c.RESTClient(),
+	}
+}
+
+// Get takes name of the cSIDriver, and returns the corresponding cSIDriver object, and an error if there is any.
+func (c *cSIDrivers) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.CSIDriver, err error) {
+	result = &v1.CSIDriver{}
+	err = c.client.Get().
+		Resource("csidrivers").
+		Name(name).
+		VersionedParams(&options, scheme.ParameterCodec).
+		Do(ctx).
+		Into(result)
+	return
+}
+
+// List takes label and field selectors, and returns the list of CSIDrivers that match those selectors.
+func (c *cSIDrivers) List(ctx context.Context, opts metav1.ListOptions) (result *v1.CSIDriverList, err error) {
+	var timeout time.Duration
+	if opts.TimeoutSeconds != nil {
+		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+	}
+	result = &v1.CSIDriverList{}
+	err = c.client.Get().
+		Resource("csidrivers").
+		VersionedParams(&opts, scheme.ParameterCodec).
+		Timeout(timeout).
+		Do(ctx).
+		Into(result)
+	return
+}
+
+// Watch returns a watch.Interface that watches the requested cSIDrivers.
+func (c *cSIDrivers) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
+	var timeout time.Duration
+	if opts.TimeoutSeconds != nil {
+		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+	}
+	opts.Watch = true
+	return c.client.Get().
+		Resource("csidrivers").
+		VersionedParams(&opts, scheme.ParameterCodec).
+		Timeout(timeout).
+		Watch(ctx)
+}
+
+// Create takes the representation of a cSIDriver and creates it.  Returns the server's representation of the cSIDriver, and an error, if there is any.
+func (c *cSIDrivers) Create(ctx context.Context, cSIDriver *v1.CSIDriver, opts metav1.CreateOptions) (result *v1.CSIDriver, err error) {
+	result = &v1.CSIDriver{}
+	err = c.client.Post().
+		Resource("csidrivers").
+		VersionedParams(&opts, scheme.ParameterCodec).
+		Body(cSIDriver).
+		Do(ctx).
+		Into(result)
+	return
+}
+
+// Update takes the representation of a cSIDriver and updates it. Returns the server's representation of the cSIDriver, and an error, if there is any.
+func (c *cSIDrivers) Update(ctx context.Context, cSIDriver *v1.CSIDriver, opts metav1.UpdateOptions) (result *v1.CSIDriver, err error) {
+	result = &v1.CSIDriver{}
+	err = c.client.Put().
+		Resource("csidrivers").
+		Name(cSIDriver.Name).
+		VersionedParams(&opts, scheme.ParameterCodec).
+		Body(cSIDriver).
+		Do(ctx).
+		Into(result)
+	return
+}
+
+// Delete takes name of the cSIDriver and deletes it. Returns an error if one occurs.
+func (c *cSIDrivers) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error {
+	return c.client.Delete().
+		Resource("csidrivers").
+		Name(name).
+		Body(&opts).
+		Do(ctx).
+		Error()
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *cSIDrivers) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error {
+	var timeout time.Duration
+	if listOpts.TimeoutSeconds != nil {
+		timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
+	}
+	return c.client.Delete().
+		Resource("csidrivers").
+		VersionedParams(&listOpts, scheme.ParameterCodec).
+		Timeout(timeout).
+		Body(&opts).
+		Do(ctx).
+		Error()
+}
+
+// Patch applies the patch and returns the patched cSIDriver.
+func (c *cSIDrivers) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.CSIDriver, err error) {
+	result = &v1.CSIDriver{}
+	err = c.client.Patch(pt).
+		Resource("csidrivers").
+		Name(name).
+		SubResource(subresources...).
+		VersionedParams(&opts, scheme.ParameterCodec).
+		Body(data).
+		Do(ctx).
+		Into(result)
+	return
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/csinode.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/csinode.go
index 20905417..f8ba2454 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/csinode.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/csinode.go
@@ -19,6 +19,7 @@ limitations under the License.
 package v1
 
 import (
+	"context"
 	"time"
 
 	v1 "k8s.io/api/storage/v1"
@@ -37,14 +38,14 @@ type CSINodesGetter interface {
 
 // CSINodeInterface has methods to work with CSINode resources.
 type CSINodeInterface interface {
-	Create(*v1.CSINode) (*v1.CSINode, error)
-	Update(*v1.CSINode) (*v1.CSINode, error)
-	Delete(name string, options *metav1.DeleteOptions) error
-	DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error
-	Get(name string, options metav1.GetOptions) (*v1.CSINode, error)
-	List(opts metav1.ListOptions) (*v1.CSINodeList, error)
-	Watch(opts metav1.ListOptions) (watch.Interface, error)
-	Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.CSINode, err error)
+	Create(ctx context.Context, cSINode *v1.CSINode, opts metav1.CreateOptions) (*v1.CSINode, error)
+	Update(ctx context.Context, cSINode *v1.CSINode, opts metav1.UpdateOptions) (*v1.CSINode, error)
+	Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error
+	DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error
+	Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.CSINode, error)
+	List(ctx context.Context, opts metav1.ListOptions) (*v1.CSINodeList, error)
+	Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error)
+	Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.CSINode, err error)
 	CSINodeExpansion
 }
 
@@ -61,19 +62,19 @@ func newCSINodes(c *StorageV1Client) *cSINodes {
 }
 
 // Get takes name of the cSINode, and returns the corresponding cSINode object, and an error if there is any.
-func (c *cSINodes) Get(name string, options metav1.GetOptions) (result *v1.CSINode, err error) {
+func (c *cSINodes) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.CSINode, err error) {
 	result = &v1.CSINode{}
 	err = c.client.Get().
 		Resource("csinodes").
 		Name(name).
 		VersionedParams(&options, scheme.ParameterCodec).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // List takes label and field selectors, and returns the list of CSINodes that match those selectors.
-func (c *cSINodes) List(opts metav1.ListOptions) (result *v1.CSINodeList, err error) {
+func (c *cSINodes) List(ctx context.Context, opts metav1.ListOptions) (result *v1.CSINodeList, err error) {
 	var timeout time.Duration
 	if opts.TimeoutSeconds != nil {
 		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
@@ -83,13 +84,13 @@ func (c *cSINodes) List(opts metav1.ListOptions) (result *v1.CSINodeList, err er
 		Resource("csinodes").
 		VersionedParams(&opts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Watch returns a watch.Interface that watches the requested cSINodes.
-func (c *cSINodes) Watch(opts metav1.ListOptions) (watch.Interface, error) {
+func (c *cSINodes) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
 	var timeout time.Duration
 	if opts.TimeoutSeconds != nil {
 		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
@@ -99,66 +100,69 @@ func (c *cSINodes) Watch(opts metav1.ListOptions) (watch.Interface, error) {
 		Resource("csinodes").
 		VersionedParams(&opts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Watch()
+		Watch(ctx)
 }
 
 // Create takes the representation of a cSINode and creates it.  Returns the server's representation of the cSINode, and an error, if there is any.
-func (c *cSINodes) Create(cSINode *v1.CSINode) (result *v1.CSINode, err error) {
+func (c *cSINodes) Create(ctx context.Context, cSINode *v1.CSINode, opts metav1.CreateOptions) (result *v1.CSINode, err error) {
 	result = &v1.CSINode{}
 	err = c.client.Post().
 		Resource("csinodes").
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(cSINode).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Update takes the representation of a cSINode and updates it. Returns the server's representation of the cSINode, and an error, if there is any.
-func (c *cSINodes) Update(cSINode *v1.CSINode) (result *v1.CSINode, err error) {
+func (c *cSINodes) Update(ctx context.Context, cSINode *v1.CSINode, opts metav1.UpdateOptions) (result *v1.CSINode, err error) {
 	result = &v1.CSINode{}
 	err = c.client.Put().
 		Resource("csinodes").
 		Name(cSINode.Name).
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(cSINode).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Delete takes name of the cSINode and deletes it. Returns an error if one occurs.
-func (c *cSINodes) Delete(name string, options *metav1.DeleteOptions) error {
+func (c *cSINodes) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error {
 	return c.client.Delete().
 		Resource("csinodes").
 		Name(name).
-		Body(options).
-		Do().
+		Body(&opts).
+		Do(ctx).
 		Error()
 }
 
 // DeleteCollection deletes a collection of objects.
-func (c *cSINodes) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error {
+func (c *cSINodes) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error {
 	var timeout time.Duration
-	if listOptions.TimeoutSeconds != nil {
-		timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second
+	if listOpts.TimeoutSeconds != nil {
+		timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
 	}
 	return c.client.Delete().
 		Resource("csinodes").
-		VersionedParams(&listOptions, scheme.ParameterCodec).
+		VersionedParams(&listOpts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Body(options).
-		Do().
+		Body(&opts).
+		Do(ctx).
 		Error()
 }
 
 // Patch applies the patch and returns the patched cSINode.
-func (c *cSINodes) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.CSINode, err error) {
+func (c *cSINodes) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.CSINode, err error) {
 	result = &v1.CSINode{}
 	err = c.client.Patch(pt).
 		Resource("csinodes").
-		SubResource(subresources...).
 		Name(name).
+		SubResource(subresources...).
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(data).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/generated_expansion.go
index d147620a..af811177 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/generated_expansion.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/generated_expansion.go
@@ -18,6 +18,8 @@ limitations under the License.
 
 package v1
 
+type CSIDriverExpansion interface{}
+
 type CSINodeExpansion interface{}
 
 type StorageClassExpansion interface{}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/storage_client.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/storage_client.go
index 822f0891..f03beae8 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/storage_client.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/storage_client.go
@@ -26,6 +26,7 @@ import (
 
 type StorageV1Interface interface {
 	RESTClient() rest.Interface
+	CSIDriversGetter
 	CSINodesGetter
 	StorageClassesGetter
 	VolumeAttachmentsGetter
@@ -36,6 +37,10 @@ type StorageV1Client struct {
 	restClient rest.Interface
 }
 
+func (c *StorageV1Client) CSIDrivers() CSIDriverInterface {
+	return newCSIDrivers(c)
+}
+
 func (c *StorageV1Client) CSINodes() CSINodeInterface {
 	return newCSINodes(c)
 }
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/storageclass.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/storageclass.go
index 3f4c48f0..046ec3a1 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/storageclass.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/storageclass.go
@@ -19,6 +19,7 @@ limitations under the License.
 package v1
 
 import (
+	"context"
 	"time"
 
 	v1 "k8s.io/api/storage/v1"
@@ -37,14 +38,14 @@ type StorageClassesGetter interface {
 
 // StorageClassInterface has methods to work with StorageClass resources.
 type StorageClassInterface interface {
-	Create(*v1.StorageClass) (*v1.StorageClass, error)
-	Update(*v1.StorageClass) (*v1.StorageClass, error)
-	Delete(name string, options *metav1.DeleteOptions) error
-	DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error
-	Get(name string, options metav1.GetOptions) (*v1.StorageClass, error)
-	List(opts metav1.ListOptions) (*v1.StorageClassList, error)
-	Watch(opts metav1.ListOptions) (watch.Interface, error)
-	Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.StorageClass, err error)
+	Create(ctx context.Context, storageClass *v1.StorageClass, opts metav1.CreateOptions) (*v1.StorageClass, error)
+	Update(ctx context.Context, storageClass *v1.StorageClass, opts metav1.UpdateOptions) (*v1.StorageClass, error)
+	Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error
+	DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error
+	Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.StorageClass, error)
+	List(ctx context.Context, opts metav1.ListOptions) (*v1.StorageClassList, error)
+	Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error)
+	Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.StorageClass, err error)
 	StorageClassExpansion
 }
 
@@ -61,19 +62,19 @@ func newStorageClasses(c *StorageV1Client) *storageClasses {
 }
 
 // Get takes name of the storageClass, and returns the corresponding storageClass object, and an error if there is any.
-func (c *storageClasses) Get(name string, options metav1.GetOptions) (result *v1.StorageClass, err error) {
+func (c *storageClasses) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.StorageClass, err error) {
 	result = &v1.StorageClass{}
 	err = c.client.Get().
 		Resource("storageclasses").
 		Name(name).
 		VersionedParams(&options, scheme.ParameterCodec).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // List takes label and field selectors, and returns the list of StorageClasses that match those selectors.
-func (c *storageClasses) List(opts metav1.ListOptions) (result *v1.StorageClassList, err error) {
+func (c *storageClasses) List(ctx context.Context, opts metav1.ListOptions) (result *v1.StorageClassList, err error) {
 	var timeout time.Duration
 	if opts.TimeoutSeconds != nil {
 		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
@@ -83,13 +84,13 @@ func (c *storageClasses) List(opts metav1.ListOptions) (result *v1.StorageClassL
 		Resource("storageclasses").
 		VersionedParams(&opts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Watch returns a watch.Interface that watches the requested storageClasses.
-func (c *storageClasses) Watch(opts metav1.ListOptions) (watch.Interface, error) {
+func (c *storageClasses) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
 	var timeout time.Duration
 	if opts.TimeoutSeconds != nil {
 		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
@@ -99,66 +100,69 @@ func (c *storageClasses) Watch(opts metav1.ListOptions) (watch.Interface, error)
 		Resource("storageclasses").
 		VersionedParams(&opts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Watch()
+		Watch(ctx)
 }
 
 // Create takes the representation of a storageClass and creates it.  Returns the server's representation of the storageClass, and an error, if there is any.
-func (c *storageClasses) Create(storageClass *v1.StorageClass) (result *v1.StorageClass, err error) {
+func (c *storageClasses) Create(ctx context.Context, storageClass *v1.StorageClass, opts metav1.CreateOptions) (result *v1.StorageClass, err error) {
 	result = &v1.StorageClass{}
 	err = c.client.Post().
 		Resource("storageclasses").
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(storageClass).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Update takes the representation of a storageClass and updates it. Returns the server's representation of the storageClass, and an error, if there is any.
-func (c *storageClasses) Update(storageClass *v1.StorageClass) (result *v1.StorageClass, err error) {
+func (c *storageClasses) Update(ctx context.Context, storageClass *v1.StorageClass, opts metav1.UpdateOptions) (result *v1.StorageClass, err error) {
 	result = &v1.StorageClass{}
 	err = c.client.Put().
 		Resource("storageclasses").
 		Name(storageClass.Name).
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(storageClass).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Delete takes name of the storageClass and deletes it. Returns an error if one occurs.
-func (c *storageClasses) Delete(name string, options *metav1.DeleteOptions) error {
+func (c *storageClasses) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error {
 	return c.client.Delete().
 		Resource("storageclasses").
 		Name(name).
-		Body(options).
-		Do().
+		Body(&opts).
+		Do(ctx).
 		Error()
 }
 
 // DeleteCollection deletes a collection of objects.
-func (c *storageClasses) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error {
+func (c *storageClasses) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error {
 	var timeout time.Duration
-	if listOptions.TimeoutSeconds != nil {
-		timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second
+	if listOpts.TimeoutSeconds != nil {
+		timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
 	}
 	return c.client.Delete().
 		Resource("storageclasses").
-		VersionedParams(&listOptions, scheme.ParameterCodec).
+		VersionedParams(&listOpts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Body(options).
-		Do().
+		Body(&opts).
+		Do(ctx).
 		Error()
 }
 
 // Patch applies the patch and returns the patched storageClass.
-func (c *storageClasses) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.StorageClass, err error) {
+func (c *storageClasses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.StorageClass, err error) {
 	result = &v1.StorageClass{}
 	err = c.client.Patch(pt).
 		Resource("storageclasses").
-		SubResource(subresources...).
 		Name(name).
+		SubResource(subresources...).
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(data).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/volumeattachment.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/volumeattachment.go
index 0f45097b..e4162975 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/volumeattachment.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/volumeattachment.go
@@ -19,6 +19,7 @@ limitations under the License.
 package v1
 
 import (
+	"context"
 	"time"
 
 	v1 "k8s.io/api/storage/v1"
@@ -37,15 +38,15 @@ type VolumeAttachmentsGetter interface {
 
 // VolumeAttachmentInterface has methods to work with VolumeAttachment resources.
 type VolumeAttachmentInterface interface {
-	Create(*v1.VolumeAttachment) (*v1.VolumeAttachment, error)
-	Update(*v1.VolumeAttachment) (*v1.VolumeAttachment, error)
-	UpdateStatus(*v1.VolumeAttachment) (*v1.VolumeAttachment, error)
-	Delete(name string, options *metav1.DeleteOptions) error
-	DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error
-	Get(name string, options metav1.GetOptions) (*v1.VolumeAttachment, error)
-	List(opts metav1.ListOptions) (*v1.VolumeAttachmentList, error)
-	Watch(opts metav1.ListOptions) (watch.Interface, error)
-	Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.VolumeAttachment, err error)
+	Create(ctx context.Context, volumeAttachment *v1.VolumeAttachment, opts metav1.CreateOptions) (*v1.VolumeAttachment, error)
+	Update(ctx context.Context, volumeAttachment *v1.VolumeAttachment, opts metav1.UpdateOptions) (*v1.VolumeAttachment, error)
+	UpdateStatus(ctx context.Context, volumeAttachment *v1.VolumeAttachment, opts metav1.UpdateOptions) (*v1.VolumeAttachment, error)
+	Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error
+	DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error
+	Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.VolumeAttachment, error)
+	List(ctx context.Context, opts metav1.ListOptions) (*v1.VolumeAttachmentList, error)
+	Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error)
+	Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.VolumeAttachment, err error)
 	VolumeAttachmentExpansion
 }
 
@@ -62,19 +63,19 @@ func newVolumeAttachments(c *StorageV1Client) *volumeAttachments {
 }
 
 // Get takes name of the volumeAttachment, and returns the corresponding volumeAttachment object, and an error if there is any.
-func (c *volumeAttachments) Get(name string, options metav1.GetOptions) (result *v1.VolumeAttachment, err error) {
+func (c *volumeAttachments) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.VolumeAttachment, err error) {
 	result = &v1.VolumeAttachment{}
 	err = c.client.Get().
 		Resource("volumeattachments").
 		Name(name).
 		VersionedParams(&options, scheme.ParameterCodec).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // List takes label and field selectors, and returns the list of VolumeAttachments that match those selectors.
-func (c *volumeAttachments) List(opts metav1.ListOptions) (result *v1.VolumeAttachmentList, err error) {
+func (c *volumeAttachments) List(ctx context.Context, opts metav1.ListOptions) (result *v1.VolumeAttachmentList, err error) {
 	var timeout time.Duration
 	if opts.TimeoutSeconds != nil {
 		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
@@ -84,13 +85,13 @@ func (c *volumeAttachments) List(opts metav1.ListOptions) (result *v1.VolumeAtta
 		Resource("volumeattachments").
 		VersionedParams(&opts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Watch returns a watch.Interface that watches the requested volumeAttachments.
-func (c *volumeAttachments) Watch(opts metav1.ListOptions) (watch.Interface, error) {
+func (c *volumeAttachments) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
 	var timeout time.Duration
 	if opts.TimeoutSeconds != nil {
 		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
@@ -100,81 +101,84 @@ func (c *volumeAttachments) Watch(opts metav1.ListOptions) (watch.Interface, err
 		Resource("volumeattachments").
 		VersionedParams(&opts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Watch()
+		Watch(ctx)
 }
 
 // Create takes the representation of a volumeAttachment and creates it.  Returns the server's representation of the volumeAttachment, and an error, if there is any.
-func (c *volumeAttachments) Create(volumeAttachment *v1.VolumeAttachment) (result *v1.VolumeAttachment, err error) {
+func (c *volumeAttachments) Create(ctx context.Context, volumeAttachment *v1.VolumeAttachment, opts metav1.CreateOptions) (result *v1.VolumeAttachment, err error) {
 	result = &v1.VolumeAttachment{}
 	err = c.client.Post().
 		Resource("volumeattachments").
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(volumeAttachment).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Update takes the representation of a volumeAttachment and updates it. Returns the server's representation of the volumeAttachment, and an error, if there is any.
-func (c *volumeAttachments) Update(volumeAttachment *v1.VolumeAttachment) (result *v1.VolumeAttachment, err error) {
+func (c *volumeAttachments) Update(ctx context.Context, volumeAttachment *v1.VolumeAttachment, opts metav1.UpdateOptions) (result *v1.VolumeAttachment, err error) {
 	result = &v1.VolumeAttachment{}
 	err = c.client.Put().
 		Resource("volumeattachments").
 		Name(volumeAttachment.Name).
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(volumeAttachment).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // UpdateStatus was generated because the type contains a Status member.
 // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
-
-func (c *volumeAttachments) UpdateStatus(volumeAttachment *v1.VolumeAttachment) (result *v1.VolumeAttachment, err error) {
+func (c *volumeAttachments) UpdateStatus(ctx context.Context, volumeAttachment *v1.VolumeAttachment, opts metav1.UpdateOptions) (result *v1.VolumeAttachment, err error) {
 	result = &v1.VolumeAttachment{}
 	err = c.client.Put().
 		Resource("volumeattachments").
 		Name(volumeAttachment.Name).
 		SubResource("status").
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(volumeAttachment).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Delete takes name of the volumeAttachment and deletes it. Returns an error if one occurs.
-func (c *volumeAttachments) Delete(name string, options *metav1.DeleteOptions) error {
+func (c *volumeAttachments) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error {
 	return c.client.Delete().
 		Resource("volumeattachments").
 		Name(name).
-		Body(options).
-		Do().
+		Body(&opts).
+		Do(ctx).
 		Error()
 }
 
 // DeleteCollection deletes a collection of objects.
-func (c *volumeAttachments) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error {
+func (c *volumeAttachments) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error {
 	var timeout time.Duration
-	if listOptions.TimeoutSeconds != nil {
-		timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second
+	if listOpts.TimeoutSeconds != nil {
+		timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
 	}
 	return c.client.Delete().
 		Resource("volumeattachments").
-		VersionedParams(&listOptions, scheme.ParameterCodec).
+		VersionedParams(&listOpts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Body(options).
-		Do().
+		Body(&opts).
+		Do(ctx).
 		Error()
 }
 
 // Patch applies the patch and returns the patched volumeAttachment.
-func (c *volumeAttachments) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.VolumeAttachment, err error) {
+func (c *volumeAttachments) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.VolumeAttachment, err error) {
 	result = &v1.VolumeAttachment{}
 	err = c.client.Patch(pt).
 		Resource("volumeattachments").
-		SubResource(subresources...).
 		Name(name).
+		SubResource(subresources...).
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(data).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/volumeattachment.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/volumeattachment.go
index 7fef94e8..9012fde9 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/volumeattachment.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/volumeattachment.go
@@ -19,6 +19,7 @@ limitations under the License.
 package v1alpha1
 
 import (
+	"context"
 	"time"
 
 	v1alpha1 "k8s.io/api/storage/v1alpha1"
@@ -37,15 +38,15 @@ type VolumeAttachmentsGetter interface {
 
 // VolumeAttachmentInterface has methods to work with VolumeAttachment resources.
 type VolumeAttachmentInterface interface {
-	Create(*v1alpha1.VolumeAttachment) (*v1alpha1.VolumeAttachment, error)
-	Update(*v1alpha1.VolumeAttachment) (*v1alpha1.VolumeAttachment, error)
-	UpdateStatus(*v1alpha1.VolumeAttachment) (*v1alpha1.VolumeAttachment, error)
-	Delete(name string, options *v1.DeleteOptions) error
-	DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error
-	Get(name string, options v1.GetOptions) (*v1alpha1.VolumeAttachment, error)
-	List(opts v1.ListOptions) (*v1alpha1.VolumeAttachmentList, error)
-	Watch(opts v1.ListOptions) (watch.Interface, error)
-	Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.VolumeAttachment, err error)
+	Create(ctx context.Context, volumeAttachment *v1alpha1.VolumeAttachment, opts v1.CreateOptions) (*v1alpha1.VolumeAttachment, error)
+	Update(ctx context.Context, volumeAttachment *v1alpha1.VolumeAttachment, opts v1.UpdateOptions) (*v1alpha1.VolumeAttachment, error)
+	UpdateStatus(ctx context.Context, volumeAttachment *v1alpha1.VolumeAttachment, opts v1.UpdateOptions) (*v1alpha1.VolumeAttachment, error)
+	Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
+	DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
+	Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.VolumeAttachment, error)
+	List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.VolumeAttachmentList, error)
+	Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
+	Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.VolumeAttachment, err error)
 	VolumeAttachmentExpansion
 }
 
@@ -62,19 +63,19 @@ func newVolumeAttachments(c *StorageV1alpha1Client) *volumeAttachments {
 }
 
 // Get takes name of the volumeAttachment, and returns the corresponding volumeAttachment object, and an error if there is any.
-func (c *volumeAttachments) Get(name string, options v1.GetOptions) (result *v1alpha1.VolumeAttachment, err error) {
+func (c *volumeAttachments) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.VolumeAttachment, err error) {
 	result = &v1alpha1.VolumeAttachment{}
 	err = c.client.Get().
 		Resource("volumeattachments").
 		Name(name).
 		VersionedParams(&options, scheme.ParameterCodec).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // List takes label and field selectors, and returns the list of VolumeAttachments that match those selectors.
-func (c *volumeAttachments) List(opts v1.ListOptions) (result *v1alpha1.VolumeAttachmentList, err error) {
+func (c *volumeAttachments) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.VolumeAttachmentList, err error) {
 	var timeout time.Duration
 	if opts.TimeoutSeconds != nil {
 		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
@@ -84,13 +85,13 @@ func (c *volumeAttachments) List(opts v1.ListOptions) (result *v1alpha1.VolumeAt
 		Resource("volumeattachments").
 		VersionedParams(&opts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Watch returns a watch.Interface that watches the requested volumeAttachments.
-func (c *volumeAttachments) Watch(opts v1.ListOptions) (watch.Interface, error) {
+func (c *volumeAttachments) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
 	var timeout time.Duration
 	if opts.TimeoutSeconds != nil {
 		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
@@ -100,81 +101,84 @@ func (c *volumeAttachments) Watch(opts v1.ListOptions) (watch.Interface, error)
 		Resource("volumeattachments").
 		VersionedParams(&opts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Watch()
+		Watch(ctx)
 }
 
 // Create takes the representation of a volumeAttachment and creates it.  Returns the server's representation of the volumeAttachment, and an error, if there is any.
-func (c *volumeAttachments) Create(volumeAttachment *v1alpha1.VolumeAttachment) (result *v1alpha1.VolumeAttachment, err error) {
+func (c *volumeAttachments) Create(ctx context.Context, volumeAttachment *v1alpha1.VolumeAttachment, opts v1.CreateOptions) (result *v1alpha1.VolumeAttachment, err error) {
 	result = &v1alpha1.VolumeAttachment{}
 	err = c.client.Post().
 		Resource("volumeattachments").
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(volumeAttachment).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Update takes the representation of a volumeAttachment and updates it. Returns the server's representation of the volumeAttachment, and an error, if there is any.
-func (c *volumeAttachments) Update(volumeAttachment *v1alpha1.VolumeAttachment) (result *v1alpha1.VolumeAttachment, err error) {
+func (c *volumeAttachments) Update(ctx context.Context, volumeAttachment *v1alpha1.VolumeAttachment, opts v1.UpdateOptions) (result *v1alpha1.VolumeAttachment, err error) {
 	result = &v1alpha1.VolumeAttachment{}
 	err = c.client.Put().
 		Resource("volumeattachments").
 		Name(volumeAttachment.Name).
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(volumeAttachment).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // UpdateStatus was generated because the type contains a Status member.
 // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
-
-func (c *volumeAttachments) UpdateStatus(volumeAttachment *v1alpha1.VolumeAttachment) (result *v1alpha1.VolumeAttachment, err error) {
+func (c *volumeAttachments) UpdateStatus(ctx context.Context, volumeAttachment *v1alpha1.VolumeAttachment, opts v1.UpdateOptions) (result *v1alpha1.VolumeAttachment, err error) {
 	result = &v1alpha1.VolumeAttachment{}
 	err = c.client.Put().
 		Resource("volumeattachments").
 		Name(volumeAttachment.Name).
 		SubResource("status").
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(volumeAttachment).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Delete takes name of the volumeAttachment and deletes it. Returns an error if one occurs.
-func (c *volumeAttachments) Delete(name string, options *v1.DeleteOptions) error {
+func (c *volumeAttachments) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
 	return c.client.Delete().
 		Resource("volumeattachments").
 		Name(name).
-		Body(options).
-		Do().
+		Body(&opts).
+		Do(ctx).
 		Error()
 }
 
 // DeleteCollection deletes a collection of objects.
-func (c *volumeAttachments) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
+func (c *volumeAttachments) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
 	var timeout time.Duration
-	if listOptions.TimeoutSeconds != nil {
-		timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second
+	if listOpts.TimeoutSeconds != nil {
+		timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
 	}
 	return c.client.Delete().
 		Resource("volumeattachments").
-		VersionedParams(&listOptions, scheme.ParameterCodec).
+		VersionedParams(&listOpts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Body(options).
-		Do().
+		Body(&opts).
+		Do(ctx).
 		Error()
 }
 
 // Patch applies the patch and returns the patched volumeAttachment.
-func (c *volumeAttachments) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.VolumeAttachment, err error) {
+func (c *volumeAttachments) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.VolumeAttachment, err error) {
 	result = &v1alpha1.VolumeAttachment{}
 	err = c.client.Patch(pt).
 		Resource("volumeattachments").
-		SubResource(subresources...).
 		Name(name).
+		SubResource(subresources...).
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(data).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/csidriver.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/csidriver.go
index 86cf9bf1..2ad26304 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/csidriver.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/csidriver.go
@@ -19,6 +19,7 @@ limitations under the License.
 package v1beta1
 
 import (
+	"context"
 	"time"
 
 	v1beta1 "k8s.io/api/storage/v1beta1"
@@ -37,14 +38,14 @@ type CSIDriversGetter interface {
 
 // CSIDriverInterface has methods to work with CSIDriver resources.
 type CSIDriverInterface interface {
-	Create(*v1beta1.CSIDriver) (*v1beta1.CSIDriver, error)
-	Update(*v1beta1.CSIDriver) (*v1beta1.CSIDriver, error)
-	Delete(name string, options *v1.DeleteOptions) error
-	DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error
-	Get(name string, options v1.GetOptions) (*v1beta1.CSIDriver, error)
-	List(opts v1.ListOptions) (*v1beta1.CSIDriverList, error)
-	Watch(opts v1.ListOptions) (watch.Interface, error)
-	Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.CSIDriver, err error)
+	Create(ctx context.Context, cSIDriver *v1beta1.CSIDriver, opts v1.CreateOptions) (*v1beta1.CSIDriver, error)
+	Update(ctx context.Context, cSIDriver *v1beta1.CSIDriver, opts v1.UpdateOptions) (*v1beta1.CSIDriver, error)
+	Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
+	DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
+	Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.CSIDriver, error)
+	List(ctx context.Context, opts v1.ListOptions) (*v1beta1.CSIDriverList, error)
+	Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
+	Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.CSIDriver, err error)
 	CSIDriverExpansion
 }
 
@@ -61,19 +62,19 @@ func newCSIDrivers(c *StorageV1beta1Client) *cSIDrivers {
 }
 
 // Get takes name of the cSIDriver, and returns the corresponding cSIDriver object, and an error if there is any.
-func (c *cSIDrivers) Get(name string, options v1.GetOptions) (result *v1beta1.CSIDriver, err error) {
+func (c *cSIDrivers) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.CSIDriver, err error) {
 	result = &v1beta1.CSIDriver{}
 	err = c.client.Get().
 		Resource("csidrivers").
 		Name(name).
 		VersionedParams(&options, scheme.ParameterCodec).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // List takes label and field selectors, and returns the list of CSIDrivers that match those selectors.
-func (c *cSIDrivers) List(opts v1.ListOptions) (result *v1beta1.CSIDriverList, err error) {
+func (c *cSIDrivers) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.CSIDriverList, err error) {
 	var timeout time.Duration
 	if opts.TimeoutSeconds != nil {
 		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
@@ -83,13 +84,13 @@ func (c *cSIDrivers) List(opts v1.ListOptions) (result *v1beta1.CSIDriverList, e
 		Resource("csidrivers").
 		VersionedParams(&opts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Watch returns a watch.Interface that watches the requested cSIDrivers.
-func (c *cSIDrivers) Watch(opts v1.ListOptions) (watch.Interface, error) {
+func (c *cSIDrivers) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
 	var timeout time.Duration
 	if opts.TimeoutSeconds != nil {
 		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
@@ -99,66 +100,69 @@ func (c *cSIDrivers) Watch(opts v1.ListOptions) (watch.Interface, error) {
 		Resource("csidrivers").
 		VersionedParams(&opts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Watch()
+		Watch(ctx)
 }
 
 // Create takes the representation of a cSIDriver and creates it.  Returns the server's representation of the cSIDriver, and an error, if there is any.
-func (c *cSIDrivers) Create(cSIDriver *v1beta1.CSIDriver) (result *v1beta1.CSIDriver, err error) {
+func (c *cSIDrivers) Create(ctx context.Context, cSIDriver *v1beta1.CSIDriver, opts v1.CreateOptions) (result *v1beta1.CSIDriver, err error) {
 	result = &v1beta1.CSIDriver{}
 	err = c.client.Post().
 		Resource("csidrivers").
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(cSIDriver).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Update takes the representation of a cSIDriver and updates it. Returns the server's representation of the cSIDriver, and an error, if there is any.
-func (c *cSIDrivers) Update(cSIDriver *v1beta1.CSIDriver) (result *v1beta1.CSIDriver, err error) {
+func (c *cSIDrivers) Update(ctx context.Context, cSIDriver *v1beta1.CSIDriver, opts v1.UpdateOptions) (result *v1beta1.CSIDriver, err error) {
 	result = &v1beta1.CSIDriver{}
 	err = c.client.Put().
 		Resource("csidrivers").
 		Name(cSIDriver.Name).
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(cSIDriver).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Delete takes name of the cSIDriver and deletes it. Returns an error if one occurs.
-func (c *cSIDrivers) Delete(name string, options *v1.DeleteOptions) error {
+func (c *cSIDrivers) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
 	return c.client.Delete().
 		Resource("csidrivers").
 		Name(name).
-		Body(options).
-		Do().
+		Body(&opts).
+		Do(ctx).
 		Error()
 }
 
 // DeleteCollection deletes a collection of objects.
-func (c *cSIDrivers) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
+func (c *cSIDrivers) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
 	var timeout time.Duration
-	if listOptions.TimeoutSeconds != nil {
-		timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second
+	if listOpts.TimeoutSeconds != nil {
+		timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
 	}
 	return c.client.Delete().
 		Resource("csidrivers").
-		VersionedParams(&listOptions, scheme.ParameterCodec).
+		VersionedParams(&listOpts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Body(options).
-		Do().
+		Body(&opts).
+		Do(ctx).
 		Error()
 }
 
 // Patch applies the patch and returns the patched cSIDriver.
-func (c *cSIDrivers) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.CSIDriver, err error) {
+func (c *cSIDrivers) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.CSIDriver, err error) {
 	result = &v1beta1.CSIDriver{}
 	err = c.client.Patch(pt).
 		Resource("csidrivers").
-		SubResource(subresources...).
 		Name(name).
+		SubResource(subresources...).
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(data).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/csinode.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/csinode.go
index e5540c12..babb89ab 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/csinode.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/csinode.go
@@ -19,6 +19,7 @@ limitations under the License.
 package v1beta1
 
 import (
+	"context"
 	"time"
 
 	v1beta1 "k8s.io/api/storage/v1beta1"
@@ -37,14 +38,14 @@ type CSINodesGetter interface {
 
 // CSINodeInterface has methods to work with CSINode resources.
 type CSINodeInterface interface {
-	Create(*v1beta1.CSINode) (*v1beta1.CSINode, error)
-	Update(*v1beta1.CSINode) (*v1beta1.CSINode, error)
-	Delete(name string, options *v1.DeleteOptions) error
-	DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error
-	Get(name string, options v1.GetOptions) (*v1beta1.CSINode, error)
-	List(opts v1.ListOptions) (*v1beta1.CSINodeList, error)
-	Watch(opts v1.ListOptions) (watch.Interface, error)
-	Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.CSINode, err error)
+	Create(ctx context.Context, cSINode *v1beta1.CSINode, opts v1.CreateOptions) (*v1beta1.CSINode, error)
+	Update(ctx context.Context, cSINode *v1beta1.CSINode, opts v1.UpdateOptions) (*v1beta1.CSINode, error)
+	Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
+	DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
+	Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.CSINode, error)
+	List(ctx context.Context, opts v1.ListOptions) (*v1beta1.CSINodeList, error)
+	Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
+	Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.CSINode, err error)
 	CSINodeExpansion
 }
 
@@ -61,19 +62,19 @@ func newCSINodes(c *StorageV1beta1Client) *cSINodes {
 }
 
 // Get takes name of the cSINode, and returns the corresponding cSINode object, and an error if there is any.
-func (c *cSINodes) Get(name string, options v1.GetOptions) (result *v1beta1.CSINode, err error) {
+func (c *cSINodes) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.CSINode, err error) {
 	result = &v1beta1.CSINode{}
 	err = c.client.Get().
 		Resource("csinodes").
 		Name(name).
 		VersionedParams(&options, scheme.ParameterCodec).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // List takes label and field selectors, and returns the list of CSINodes that match those selectors.
-func (c *cSINodes) List(opts v1.ListOptions) (result *v1beta1.CSINodeList, err error) {
+func (c *cSINodes) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.CSINodeList, err error) {
 	var timeout time.Duration
 	if opts.TimeoutSeconds != nil {
 		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
@@ -83,13 +84,13 @@ func (c *cSINodes) List(opts v1.ListOptions) (result *v1beta1.CSINodeList, err e
 		Resource("csinodes").
 		VersionedParams(&opts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Watch returns a watch.Interface that watches the requested cSINodes.
-func (c *cSINodes) Watch(opts v1.ListOptions) (watch.Interface, error) {
+func (c *cSINodes) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
 	var timeout time.Duration
 	if opts.TimeoutSeconds != nil {
 		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
@@ -99,66 +100,69 @@ func (c *cSINodes) Watch(opts v1.ListOptions) (watch.Interface, error) {
 		Resource("csinodes").
 		VersionedParams(&opts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Watch()
+		Watch(ctx)
 }
 
 // Create takes the representation of a cSINode and creates it.  Returns the server's representation of the cSINode, and an error, if there is any.
-func (c *cSINodes) Create(cSINode *v1beta1.CSINode) (result *v1beta1.CSINode, err error) {
+func (c *cSINodes) Create(ctx context.Context, cSINode *v1beta1.CSINode, opts v1.CreateOptions) (result *v1beta1.CSINode, err error) {
 	result = &v1beta1.CSINode{}
 	err = c.client.Post().
 		Resource("csinodes").
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(cSINode).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Update takes the representation of a cSINode and updates it. Returns the server's representation of the cSINode, and an error, if there is any.
-func (c *cSINodes) Update(cSINode *v1beta1.CSINode) (result *v1beta1.CSINode, err error) {
+func (c *cSINodes) Update(ctx context.Context, cSINode *v1beta1.CSINode, opts v1.UpdateOptions) (result *v1beta1.CSINode, err error) {
 	result = &v1beta1.CSINode{}
 	err = c.client.Put().
 		Resource("csinodes").
 		Name(cSINode.Name).
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(cSINode).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Delete takes name of the cSINode and deletes it. Returns an error if one occurs.
-func (c *cSINodes) Delete(name string, options *v1.DeleteOptions) error {
+func (c *cSINodes) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
 	return c.client.Delete().
 		Resource("csinodes").
 		Name(name).
-		Body(options).
-		Do().
+		Body(&opts).
+		Do(ctx).
 		Error()
 }
 
 // DeleteCollection deletes a collection of objects.
-func (c *cSINodes) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
+func (c *cSINodes) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
 	var timeout time.Duration
-	if listOptions.TimeoutSeconds != nil {
-		timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second
+	if listOpts.TimeoutSeconds != nil {
+		timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
 	}
 	return c.client.Delete().
 		Resource("csinodes").
-		VersionedParams(&listOptions, scheme.ParameterCodec).
+		VersionedParams(&listOpts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Body(options).
-		Do().
+		Body(&opts).
+		Do(ctx).
 		Error()
 }
 
 // Patch applies the patch and returns the patched cSINode.
-func (c *cSINodes) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.CSINode, err error) {
+func (c *cSINodes) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.CSINode, err error) {
 	result = &v1beta1.CSINode{}
 	err = c.client.Patch(pt).
 		Resource("csinodes").
-		SubResource(subresources...).
 		Name(name).
+		SubResource(subresources...).
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(data).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/storageclass.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/storageclass.go
index 8a8f3891..d6a8da98 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/storageclass.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/storageclass.go
@@ -19,6 +19,7 @@ limitations under the License.
 package v1beta1
 
 import (
+	"context"
 	"time"
 
 	v1beta1 "k8s.io/api/storage/v1beta1"
@@ -37,14 +38,14 @@ type StorageClassesGetter interface {
 
 // StorageClassInterface has methods to work with StorageClass resources.
 type StorageClassInterface interface {
-	Create(*v1beta1.StorageClass) (*v1beta1.StorageClass, error)
-	Update(*v1beta1.StorageClass) (*v1beta1.StorageClass, error)
-	Delete(name string, options *v1.DeleteOptions) error
-	DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error
-	Get(name string, options v1.GetOptions) (*v1beta1.StorageClass, error)
-	List(opts v1.ListOptions) (*v1beta1.StorageClassList, error)
-	Watch(opts v1.ListOptions) (watch.Interface, error)
-	Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.StorageClass, err error)
+	Create(ctx context.Context, storageClass *v1beta1.StorageClass, opts v1.CreateOptions) (*v1beta1.StorageClass, error)
+	Update(ctx context.Context, storageClass *v1beta1.StorageClass, opts v1.UpdateOptions) (*v1beta1.StorageClass, error)
+	Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
+	DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
+	Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.StorageClass, error)
+	List(ctx context.Context, opts v1.ListOptions) (*v1beta1.StorageClassList, error)
+	Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
+	Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.StorageClass, err error)
 	StorageClassExpansion
 }
 
@@ -61,19 +62,19 @@ func newStorageClasses(c *StorageV1beta1Client) *storageClasses {
 }
 
 // Get takes name of the storageClass, and returns the corresponding storageClass object, and an error if there is any.
-func (c *storageClasses) Get(name string, options v1.GetOptions) (result *v1beta1.StorageClass, err error) {
+func (c *storageClasses) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.StorageClass, err error) {
 	result = &v1beta1.StorageClass{}
 	err = c.client.Get().
 		Resource("storageclasses").
 		Name(name).
 		VersionedParams(&options, scheme.ParameterCodec).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // List takes label and field selectors, and returns the list of StorageClasses that match those selectors.
-func (c *storageClasses) List(opts v1.ListOptions) (result *v1beta1.StorageClassList, err error) {
+func (c *storageClasses) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.StorageClassList, err error) {
 	var timeout time.Duration
 	if opts.TimeoutSeconds != nil {
 		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
@@ -83,13 +84,13 @@ func (c *storageClasses) List(opts v1.ListOptions) (result *v1beta1.StorageClass
 		Resource("storageclasses").
 		VersionedParams(&opts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Watch returns a watch.Interface that watches the requested storageClasses.
-func (c *storageClasses) Watch(opts v1.ListOptions) (watch.Interface, error) {
+func (c *storageClasses) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
 	var timeout time.Duration
 	if opts.TimeoutSeconds != nil {
 		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
@@ -99,66 +100,69 @@ func (c *storageClasses) Watch(opts v1.ListOptions) (watch.Interface, error) {
 		Resource("storageclasses").
 		VersionedParams(&opts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Watch()
+		Watch(ctx)
 }
 
 // Create takes the representation of a storageClass and creates it.  Returns the server's representation of the storageClass, and an error, if there is any.
-func (c *storageClasses) Create(storageClass *v1beta1.StorageClass) (result *v1beta1.StorageClass, err error) {
+func (c *storageClasses) Create(ctx context.Context, storageClass *v1beta1.StorageClass, opts v1.CreateOptions) (result *v1beta1.StorageClass, err error) {
 	result = &v1beta1.StorageClass{}
 	err = c.client.Post().
 		Resource("storageclasses").
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(storageClass).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Update takes the representation of a storageClass and updates it. Returns the server's representation of the storageClass, and an error, if there is any.
-func (c *storageClasses) Update(storageClass *v1beta1.StorageClass) (result *v1beta1.StorageClass, err error) {
+func (c *storageClasses) Update(ctx context.Context, storageClass *v1beta1.StorageClass, opts v1.UpdateOptions) (result *v1beta1.StorageClass, err error) {
 	result = &v1beta1.StorageClass{}
 	err = c.client.Put().
 		Resource("storageclasses").
 		Name(storageClass.Name).
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(storageClass).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Delete takes name of the storageClass and deletes it. Returns an error if one occurs.
-func (c *storageClasses) Delete(name string, options *v1.DeleteOptions) error {
+func (c *storageClasses) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
 	return c.client.Delete().
 		Resource("storageclasses").
 		Name(name).
-		Body(options).
-		Do().
+		Body(&opts).
+		Do(ctx).
 		Error()
 }
 
 // DeleteCollection deletes a collection of objects.
-func (c *storageClasses) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
+func (c *storageClasses) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
 	var timeout time.Duration
-	if listOptions.TimeoutSeconds != nil {
-		timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second
+	if listOpts.TimeoutSeconds != nil {
+		timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
 	}
 	return c.client.Delete().
 		Resource("storageclasses").
-		VersionedParams(&listOptions, scheme.ParameterCodec).
+		VersionedParams(&listOpts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Body(options).
-		Do().
+		Body(&opts).
+		Do(ctx).
 		Error()
 }
 
 // Patch applies the patch and returns the patched storageClass.
-func (c *storageClasses) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.StorageClass, err error) {
+func (c *storageClasses) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.StorageClass, err error) {
 	result = &v1beta1.StorageClass{}
 	err = c.client.Patch(pt).
 		Resource("storageclasses").
-		SubResource(subresources...).
 		Name(name).
+		SubResource(subresources...).
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(data).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/volumeattachment.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/volumeattachment.go
index d319407f..951a5e71 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/volumeattachment.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/volumeattachment.go
@@ -19,6 +19,7 @@ limitations under the License.
 package v1beta1
 
 import (
+	"context"
 	"time"
 
 	v1beta1 "k8s.io/api/storage/v1beta1"
@@ -37,15 +38,15 @@ type VolumeAttachmentsGetter interface {
 
 // VolumeAttachmentInterface has methods to work with VolumeAttachment resources.
 type VolumeAttachmentInterface interface {
-	Create(*v1beta1.VolumeAttachment) (*v1beta1.VolumeAttachment, error)
-	Update(*v1beta1.VolumeAttachment) (*v1beta1.VolumeAttachment, error)
-	UpdateStatus(*v1beta1.VolumeAttachment) (*v1beta1.VolumeAttachment, error)
-	Delete(name string, options *v1.DeleteOptions) error
-	DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error
-	Get(name string, options v1.GetOptions) (*v1beta1.VolumeAttachment, error)
-	List(opts v1.ListOptions) (*v1beta1.VolumeAttachmentList, error)
-	Watch(opts v1.ListOptions) (watch.Interface, error)
-	Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.VolumeAttachment, err error)
+	Create(ctx context.Context, volumeAttachment *v1beta1.VolumeAttachment, opts v1.CreateOptions) (*v1beta1.VolumeAttachment, error)
+	Update(ctx context.Context, volumeAttachment *v1beta1.VolumeAttachment, opts v1.UpdateOptions) (*v1beta1.VolumeAttachment, error)
+	UpdateStatus(ctx context.Context, volumeAttachment *v1beta1.VolumeAttachment, opts v1.UpdateOptions) (*v1beta1.VolumeAttachment, error)
+	Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
+	DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
+	Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.VolumeAttachment, error)
+	List(ctx context.Context, opts v1.ListOptions) (*v1beta1.VolumeAttachmentList, error)
+	Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
+	Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.VolumeAttachment, err error)
 	VolumeAttachmentExpansion
 }
 
@@ -62,19 +63,19 @@ func newVolumeAttachments(c *StorageV1beta1Client) *volumeAttachments {
 }
 
 // Get takes name of the volumeAttachment, and returns the corresponding volumeAttachment object, and an error if there is any.
-func (c *volumeAttachments) Get(name string, options v1.GetOptions) (result *v1beta1.VolumeAttachment, err error) {
+func (c *volumeAttachments) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.VolumeAttachment, err error) {
 	result = &v1beta1.VolumeAttachment{}
 	err = c.client.Get().
 		Resource("volumeattachments").
 		Name(name).
 		VersionedParams(&options, scheme.ParameterCodec).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // List takes label and field selectors, and returns the list of VolumeAttachments that match those selectors.
-func (c *volumeAttachments) List(opts v1.ListOptions) (result *v1beta1.VolumeAttachmentList, err error) {
+func (c *volumeAttachments) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.VolumeAttachmentList, err error) {
 	var timeout time.Duration
 	if opts.TimeoutSeconds != nil {
 		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
@@ -84,13 +85,13 @@ func (c *volumeAttachments) List(opts v1.ListOptions) (result *v1beta1.VolumeAtt
 		Resource("volumeattachments").
 		VersionedParams(&opts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Watch returns a watch.Interface that watches the requested volumeAttachments.
-func (c *volumeAttachments) Watch(opts v1.ListOptions) (watch.Interface, error) {
+func (c *volumeAttachments) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
 	var timeout time.Duration
 	if opts.TimeoutSeconds != nil {
 		timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
@@ -100,81 +101,84 @@ func (c *volumeAttachments) Watch(opts v1.ListOptions) (watch.Interface, error)
 		Resource("volumeattachments").
 		VersionedParams(&opts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Watch()
+		Watch(ctx)
 }
 
 // Create takes the representation of a volumeAttachment and creates it.  Returns the server's representation of the volumeAttachment, and an error, if there is any.
-func (c *volumeAttachments) Create(volumeAttachment *v1beta1.VolumeAttachment) (result *v1beta1.VolumeAttachment, err error) {
+func (c *volumeAttachments) Create(ctx context.Context, volumeAttachment *v1beta1.VolumeAttachment, opts v1.CreateOptions) (result *v1beta1.VolumeAttachment, err error) {
 	result = &v1beta1.VolumeAttachment{}
 	err = c.client.Post().
 		Resource("volumeattachments").
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(volumeAttachment).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Update takes the representation of a volumeAttachment and updates it. Returns the server's representation of the volumeAttachment, and an error, if there is any.
-func (c *volumeAttachments) Update(volumeAttachment *v1beta1.VolumeAttachment) (result *v1beta1.VolumeAttachment, err error) {
+func (c *volumeAttachments) Update(ctx context.Context, volumeAttachment *v1beta1.VolumeAttachment, opts v1.UpdateOptions) (result *v1beta1.VolumeAttachment, err error) {
 	result = &v1beta1.VolumeAttachment{}
 	err = c.client.Put().
 		Resource("volumeattachments").
 		Name(volumeAttachment.Name).
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(volumeAttachment).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // UpdateStatus was generated because the type contains a Status member.
 // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
-
-func (c *volumeAttachments) UpdateStatus(volumeAttachment *v1beta1.VolumeAttachment) (result *v1beta1.VolumeAttachment, err error) {
+func (c *volumeAttachments) UpdateStatus(ctx context.Context, volumeAttachment *v1beta1.VolumeAttachment, opts v1.UpdateOptions) (result *v1beta1.VolumeAttachment, err error) {
 	result = &v1beta1.VolumeAttachment{}
 	err = c.client.Put().
 		Resource("volumeattachments").
 		Name(volumeAttachment.Name).
 		SubResource("status").
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(volumeAttachment).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
 
 // Delete takes name of the volumeAttachment and deletes it. Returns an error if one occurs.
-func (c *volumeAttachments) Delete(name string, options *v1.DeleteOptions) error {
+func (c *volumeAttachments) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
 	return c.client.Delete().
 		Resource("volumeattachments").
 		Name(name).
-		Body(options).
-		Do().
+		Body(&opts).
+		Do(ctx).
 		Error()
 }
 
 // DeleteCollection deletes a collection of objects.
-func (c *volumeAttachments) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
+func (c *volumeAttachments) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
 	var timeout time.Duration
-	if listOptions.TimeoutSeconds != nil {
-		timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second
+	if listOpts.TimeoutSeconds != nil {
+		timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
 	}
 	return c.client.Delete().
 		Resource("volumeattachments").
-		VersionedParams(&listOptions, scheme.ParameterCodec).
+		VersionedParams(&listOpts, scheme.ParameterCodec).
 		Timeout(timeout).
-		Body(options).
-		Do().
+		Body(&opts).
+		Do(ctx).
 		Error()
 }
 
 // Patch applies the patch and returns the patched volumeAttachment.
-func (c *volumeAttachments) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.VolumeAttachment, err error) {
+func (c *volumeAttachments) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.VolumeAttachment, err error) {
 	result = &v1beta1.VolumeAttachment{}
 	err = c.client.Patch(pt).
 		Resource("volumeattachments").
-		SubResource(subresources...).
 		Name(name).
+		SubResource(subresources...).
+		VersionedParams(&opts, scheme.ParameterCodec).
 		Body(data).
-		Do().
+		Do(ctx).
 		Into(result)
 	return
 }
diff --git a/vendor/k8s.io/client-go/plugin/pkg/client/auth/exec/exec.go b/vendor/k8s.io/client-go/plugin/pkg/client/auth/exec/exec.go
index 741729bb..71ed045a 100644
--- a/vendor/k8s.io/client-go/plugin/pkg/client/auth/exec/exec.go
+++ b/vendor/k8s.io/client-go/plugin/pkg/client/auth/exec/exec.go
@@ -20,6 +20,7 @@ import (
 	"bytes"
 	"context"
 	"crypto/tls"
+	"crypto/x509"
 	"errors"
 	"fmt"
 	"io"
@@ -42,6 +43,7 @@ import (
 	"k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1"
 	"k8s.io/client-go/pkg/apis/clientauthentication/v1beta1"
 	"k8s.io/client-go/tools/clientcmd/api"
+	"k8s.io/client-go/tools/metrics"
 	"k8s.io/client-go/transport"
 	"k8s.io/client-go/util/connrotation"
 	"k8s.io/klog"
@@ -260,6 +262,7 @@ func (a *Authenticator) cert() (*tls.Certificate, error) {
 func (a *Authenticator) getCreds() (*credentials, error) {
 	a.mu.Lock()
 	defer a.mu.Unlock()
+
 	if a.cachedCreds != nil && !a.credsExpired() {
 		return a.cachedCreds, nil
 	}
@@ -267,6 +270,7 @@ func (a *Authenticator) getCreds() (*credentials, error) {
 	if err := a.refreshCredsLocked(nil); err != nil {
 		return nil, err
 	}
+
 	return a.cachedCreds, nil
 }
 
@@ -355,6 +359,17 @@ func (a *Authenticator) refreshCredsLocked(r *clientauthentication.Response) err
 		if err != nil {
 			return fmt.Errorf("failed parsing client key/certificate: %v", err)
 		}
+
+		// Leaf is initialized to be nil:
+		//  https://golang.org/pkg/crypto/tls/#X509KeyPair
+		// Leaf certificate is the first certificate:
+		//  https://golang.org/pkg/crypto/tls/#Certificate
+		// Populating leaf is useful for quickly accessing the underlying x509
+		// certificate values.
+		cert.Leaf, err = x509.ParseCertificate(cert.Certificate[0])
+		if err != nil {
+			return fmt.Errorf("failed parsing client leaf certificate: %v", err)
+		}
 		newCreds.cert = &cert
 	}
 
@@ -362,10 +377,20 @@ func (a *Authenticator) refreshCredsLocked(r *clientauthentication.Response) err
 	a.cachedCreds = newCreds
 	// Only close all connections when TLS cert rotates. Token rotation doesn't
 	// need the extra noise.
-	if len(a.onRotateList) > 0 && oldCreds != nil && !reflect.DeepEqual(oldCreds.cert, a.cachedCreds.cert) {
+	if oldCreds != nil && !reflect.DeepEqual(oldCreds.cert, a.cachedCreds.cert) {
+		// Can be nil if the exec auth plugin only returned token auth.
+		if oldCreds.cert != nil && oldCreds.cert.Leaf != nil {
+			metrics.ClientCertRotationAge.Observe(time.Now().Sub(oldCreds.cert.Leaf.NotBefore))
+		}
 		for _, onRotate := range a.onRotateList {
 			onRotate()
 		}
 	}
+
+	expiry := time.Time{}
+	if a.cachedCreds.cert != nil && a.cachedCreds.cert.Leaf != nil {
+		expiry = a.cachedCreds.cert.Leaf.NotAfter
+	}
+	expirationMetrics.set(a, expiry)
 	return nil
 }
diff --git a/vendor/k8s.io/client-go/plugin/pkg/client/auth/exec/metrics.go b/vendor/k8s.io/client-go/plugin/pkg/client/auth/exec/metrics.go
new file mode 100644
index 00000000..caf0cca3
--- /dev/null
+++ b/vendor/k8s.io/client-go/plugin/pkg/client/auth/exec/metrics.go
@@ -0,0 +1,60 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package exec
+
+import (
+	"sync"
+	"time"
+
+	"k8s.io/client-go/tools/metrics"
+)
+
+type certificateExpirationTracker struct {
+	mu        sync.RWMutex
+	m         map[*Authenticator]time.Time
+	metricSet func(*time.Time)
+}
+
+var expirationMetrics = &certificateExpirationTracker{
+	m: map[*Authenticator]time.Time{},
+	metricSet: func(e *time.Time) {
+		metrics.ClientCertExpiry.Set(e)
+	},
+}
+
+// set stores the given expiration time and updates the updates the certificate
+// expiry metric to the earliest expiration time.
+func (c *certificateExpirationTracker) set(a *Authenticator, t time.Time) {
+	c.mu.Lock()
+	defer c.mu.Unlock()
+	c.m[a] = t
+
+	earliest := time.Time{}
+	for _, t := range c.m {
+		if t.IsZero() {
+			continue
+		}
+		if earliest.IsZero() || earliest.After(t) {
+			earliest = t
+		}
+	}
+	if earliest.IsZero() {
+		c.metricSet(nil)
+	} else {
+		c.metricSet(&earliest)
+	}
+}
diff --git a/vendor/k8s.io/client-go/rest/request.go b/vendor/k8s.io/client-go/rest/request.go
index 9e0c2611..1acd189e 100644
--- a/vendor/k8s.io/client-go/rest/request.go
+++ b/vendor/k8s.io/client-go/rest/request.go
@@ -30,6 +30,7 @@ import (
 	"reflect"
 	"strconv"
 	"strings"
+	"sync"
 	"time"
 
 	"golang.org/x/net/http2"
@@ -38,6 +39,7 @@ import (
 	"k8s.io/apimachinery/pkg/runtime"
 	"k8s.io/apimachinery/pkg/runtime/schema"
 	"k8s.io/apimachinery/pkg/runtime/serializer/streaming"
+	utilclock "k8s.io/apimachinery/pkg/util/clock"
 	"k8s.io/apimachinery/pkg/util/net"
 	"k8s.io/apimachinery/pkg/watch"
 	restclientwatch "k8s.io/client-go/rest/watch"
@@ -51,6 +53,9 @@ var (
 	// throttled (via the provided rateLimiter) for more than longThrottleLatency will
 	// be logged.
 	longThrottleLatency = 50 * time.Millisecond
+
+	// extraLongThrottleLatency defines the threshold for logging requests at log level 2.
+	extraLongThrottleLatency = 1 * time.Second
 )
 
 // HTTPClient is an interface for testing a request object.
@@ -61,8 +66,8 @@ type HTTPClient interface {
 // ResponseWrapper is an interface for getting a response.
 // The response may be either accessed as a raw data (the whole output is put into memory) or as a stream.
 type ResponseWrapper interface {
-	DoRaw() ([]byte, error)
-	Stream() (io.ReadCloser, error)
+	DoRaw(context.Context) ([]byte, error)
+	Stream(context.Context) (io.ReadCloser, error)
 }
 
 // RequestConstructionError is returned when there's an error assembling a request.
@@ -104,9 +109,6 @@ type Request struct {
 	// output
 	err  error
 	body io.Reader
-
-	// This is only used for per-request timeouts, deadlines, and cancellations.
-	ctx context.Context
 }
 
 // NewRequest creates a new request helper object for accessing runtime.Objects on a server.
@@ -438,13 +440,6 @@ func (r *Request) Body(obj interface{}) *Request {
 	return r
 }
 
-// Context adds a context to the request. Contexts are only used for
-// timeouts, deadlines, and cancellations.
-func (r *Request) Context(ctx context.Context) *Request {
-	r.ctx = ctx
-	return r
-}
-
 // URL returns the current working URL.
 func (r *Request) URL() *url.URL {
 	p := r.pathPrefix
@@ -548,29 +543,88 @@ func (r Request) finalURLTemplate() url.URL {
 	return *url
 }
 
-func (r *Request) tryThrottle() error {
+func (r *Request) tryThrottle(ctx context.Context) error {
 	if r.rateLimiter == nil {
 		return nil
 	}
 
 	now := time.Now()
-	var err error
-	if r.ctx != nil {
-		err = r.rateLimiter.Wait(r.ctx)
-	} else {
-		r.rateLimiter.Accept()
-	}
 
-	if latency := time.Since(now); latency > longThrottleLatency {
-		klog.V(4).Infof("Throttling request took %v, request: %s:%s", latency, r.verb, r.URL().String())
+	err := r.rateLimiter.Wait(ctx)
+
+	latency := time.Since(now)
+	if latency > longThrottleLatency {
+		klog.V(3).Infof("Throttling request took %v, request: %s:%s", latency, r.verb, r.URL().String())
 	}
+	if latency > extraLongThrottleLatency {
+		// If the rate limiter latency is very high, the log message should be printed at a higher log level,
+		// but we use a throttled logger to prevent spamming.
+		globalThrottledLogger.Infof("Throttling request took %v, request: %s:%s", latency, r.verb, r.URL().String())
+	}
+	metrics.RateLimiterLatency.Observe(r.verb, r.finalURLTemplate(), latency)
 
 	return err
 }
 
+type throttleSettings struct {
+	logLevel       klog.Level
+	minLogInterval time.Duration
+
+	lastLogTime time.Time
+	lock        sync.RWMutex
+}
+
+type throttledLogger struct {
+	clock    utilclock.PassiveClock
+	settings []*throttleSettings
+}
+
+var globalThrottledLogger = &throttledLogger{
+	clock: utilclock.RealClock{},
+	settings: []*throttleSettings{
+		{
+			logLevel:       2,
+			minLogInterval: 1 * time.Second,
+		}, {
+			logLevel:       0,
+			minLogInterval: 10 * time.Second,
+		},
+	},
+}
+
+func (b *throttledLogger) attemptToLog() (klog.Level, bool) {
+	for _, setting := range b.settings {
+		if bool(klog.V(setting.logLevel)) {
+			// Return early without write locking if possible.
+			if func() bool {
+				setting.lock.RLock()
+				defer setting.lock.RUnlock()
+				return b.clock.Since(setting.lastLogTime) >= setting.minLogInterval
+			}() {
+				setting.lock.Lock()
+				defer setting.lock.Unlock()
+				if b.clock.Since(setting.lastLogTime) >= setting.minLogInterval {
+					setting.lastLogTime = b.clock.Now()
+					return setting.logLevel, true
+				}
+			}
+			return -1, false
+		}
+	}
+	return -1, false
+}
+
+// Infof will write a log message at each logLevel specified by the reciever's throttleSettings
+// as long as it hasn't written a log message more recently than minLogInterval.
+func (b *throttledLogger) Infof(message string, args ...interface{}) {
+	if logLevel, ok := b.attemptToLog(); ok {
+		klog.V(logLevel).Infof(message, args...)
+	}
+}
+
 // Watch attempts to begin watching the requested location.
 // Returns a watch.Interface, or an error.
-func (r *Request) Watch() (watch.Interface, error) {
+func (r *Request) Watch(ctx context.Context) (watch.Interface, error) {
 	// We specifically don't want to rate limit watches, so we
 	// don't use r.rateLimiter here.
 	if r.err != nil {
@@ -582,9 +636,7 @@ func (r *Request) Watch() (watch.Interface, error) {
 	if err != nil {
 		return nil, err
 	}
-	if r.ctx != nil {
-		req = req.WithContext(r.ctx)
-	}
+	req = req.WithContext(ctx)
 	req.Header = r.headers
 	client := r.c.Client
 	if client == nil {
@@ -659,12 +711,12 @@ func updateURLMetrics(req *Request, resp *http.Response, err error) {
 // Returns io.ReadCloser which could be used for streaming of the response, or an error
 // Any non-2xx http status code causes an error.  If we get a non-2xx code, we try to convert the body into an APIStatus object.
 // If we can, we return that as an error.  Otherwise, we create an error that lists the http status and the content of the response.
-func (r *Request) Stream() (io.ReadCloser, error) {
+func (r *Request) Stream(ctx context.Context) (io.ReadCloser, error) {
 	if r.err != nil {
 		return nil, r.err
 	}
 
-	if err := r.tryThrottle(); err != nil {
+	if err := r.tryThrottle(ctx); err != nil {
 		return nil, err
 	}
 
@@ -676,9 +728,7 @@ func (r *Request) Stream() (io.ReadCloser, error) {
 	if r.body != nil {
 		req.Body = ioutil.NopCloser(r.body)
 	}
-	if r.ctx != nil {
-		req = req.WithContext(r.ctx)
-	}
+	req = req.WithContext(ctx)
 	req.Header = r.headers
 	client := r.c.Client
 	if client == nil {
@@ -746,7 +796,7 @@ func (r *Request) requestPreflightCheck() error {
 // received. It handles retry behavior and up front validation of requests. It will invoke
 // fn at most once. It will return an error if a problem occurred prior to connecting to the
 // server - the provided function is responsible for handling server errors.
-func (r *Request) request(fn func(*http.Request, *http.Response)) error {
+func (r *Request) request(ctx context.Context, fn func(*http.Request, *http.Response)) error {
 	//Metrics for total request latency
 	start := time.Now()
 	defer func() {
@@ -767,26 +817,30 @@ func (r *Request) request(fn func(*http.Request, *http.Response)) error {
 		client = http.DefaultClient
 	}
 
+	// Throttle the first try before setting up the timeout configured on the
+	// client. We don't want a throttled client to return timeouts to callers
+	// before it makes a single request.
+	if err := r.tryThrottle(ctx); err != nil {
+		return err
+	}
+
+	if r.timeout > 0 {
+		var cancel context.CancelFunc
+		ctx, cancel = context.WithTimeout(ctx, r.timeout)
+		defer cancel()
+	}
+
 	// Right now we make about ten retry attempts if we get a Retry-After response.
 	maxRetries := 10
 	retries := 0
 	for {
+
 		url := r.URL().String()
 		req, err := http.NewRequest(r.verb, url, r.body)
 		if err != nil {
 			return err
 		}
-		if r.timeout > 0 {
-			if r.ctx == nil {
-				r.ctx = context.Background()
-			}
-			var cancelFn context.CancelFunc
-			r.ctx, cancelFn = context.WithTimeout(r.ctx, r.timeout)
-			defer cancelFn()
-		}
-		if r.ctx != nil {
-			req = req.WithContext(r.ctx)
-		}
+		req = req.WithContext(ctx)
 		req.Header = r.headers
 
 		r.backoff.Sleep(r.backoff.CalculateBackoff(r.URL()))
@@ -794,7 +848,7 @@ func (r *Request) request(fn func(*http.Request, *http.Response)) error {
 			// We are retrying the request that we already send to apiserver
 			// at least once before.
 			// This request should also be throttled with the client-internal rate limiter.
-			if err := r.tryThrottle(); err != nil {
+			if err := r.tryThrottle(ctx); err != nil {
 				return err
 			}
 		}
@@ -806,19 +860,24 @@ func (r *Request) request(fn func(*http.Request, *http.Response)) error {
 			r.backoff.UpdateBackoff(r.URL(), err, resp.StatusCode)
 		}
 		if err != nil {
-			// "Connection reset by peer" is usually a transient error.
+			// "Connection reset by peer" or "apiserver is shutting down" are usually a transient errors.
 			// Thus in case of "GET" operations, we simply retry it.
 			// We are not automatically retrying "write" operations, as
 			// they are not idempotent.
-			if !net.IsConnectionReset(err) || r.verb != "GET" {
+			if r.verb != "GET" {
 				return err
 			}
-			// For the purpose of retry, we set the artificial "retry-after" response.
-			// TODO: Should we clean the original response if it exists?
-			resp = &http.Response{
-				StatusCode: http.StatusInternalServerError,
-				Header:     http.Header{"Retry-After": []string{"1"}},
-				Body:       ioutil.NopCloser(bytes.NewReader([]byte{})),
+			// For connection errors and apiserver shutdown errors retry.
+			if net.IsConnectionReset(err) || net.IsProbableEOF(err) {
+				// For the purpose of retry, we set the artificial "retry-after" response.
+				// TODO: Should we clean the original response if it exists?
+				resp = &http.Response{
+					StatusCode: http.StatusInternalServerError,
+					Header:     http.Header{"Retry-After": []string{"1"}},
+					Body:       ioutil.NopCloser(bytes.NewReader([]byte{})),
+				}
+			} else {
+				return err
 			}
 		}
 
@@ -864,13 +923,9 @@ func (r *Request) request(fn func(*http.Request, *http.Response)) error {
 // Error type:
 //  * If the server responds with a status: *errors.StatusError or *errors.UnexpectedObjectError
 //  * http.Client.Do errors are returned directly.
-func (r *Request) Do() Result {
-	if err := r.tryThrottle(); err != nil {
-		return Result{err: err}
-	}
-
+func (r *Request) Do(ctx context.Context) Result {
 	var result Result
-	err := r.request(func(req *http.Request, resp *http.Response) {
+	err := r.request(ctx, func(req *http.Request, resp *http.Response) {
 		result = r.transformResponse(resp, req)
 	})
 	if err != nil {
@@ -880,13 +935,9 @@ func (r *Request) Do() Result {
 }
 
 // DoRaw executes the request but does not process the response body.
-func (r *Request) DoRaw() ([]byte, error) {
-	if err := r.tryThrottle(); err != nil {
-		return nil, err
-	}
-
+func (r *Request) DoRaw(ctx context.Context) ([]byte, error) {
 	var result Result
-	err := r.request(func(req *http.Request, resp *http.Response) {
+	err := r.request(ctx, func(req *http.Request, resp *http.Response) {
 		result.body, result.err = ioutil.ReadAll(resp.Body)
 		glogBody("Response Body", result.body)
 		if resp.StatusCode < http.StatusOK || resp.StatusCode > http.StatusPartialContent {
diff --git a/vendor/k8s.io/client-go/restmapper/category_expansion.go b/vendor/k8s.io/client-go/restmapper/category_expansion.go
new file mode 100644
index 00000000..2537a2b4
--- /dev/null
+++ b/vendor/k8s.io/client-go/restmapper/category_expansion.go
@@ -0,0 +1,119 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package restmapper
+
+import (
+	"k8s.io/apimachinery/pkg/runtime/schema"
+	"k8s.io/client-go/discovery"
+)
+
+// CategoryExpander maps category strings to GroupResources.
+// Categories are classification or 'tag' of a group of resources.
+type CategoryExpander interface {
+	Expand(category string) ([]schema.GroupResource, bool)
+}
+
+// SimpleCategoryExpander implements CategoryExpander interface
+// using a static mapping of categories to GroupResource mapping.
+type SimpleCategoryExpander struct {
+	Expansions map[string][]schema.GroupResource
+}
+
+// Expand fulfills CategoryExpander
+func (e SimpleCategoryExpander) Expand(category string) ([]schema.GroupResource, bool) {
+	ret, ok := e.Expansions[category]
+	return ret, ok
+}
+
+// discoveryCategoryExpander struct lets a REST Client wrapper (discoveryClient) to retrieve list of APIResourceList,
+// and then convert to fallbackExpander
+type discoveryCategoryExpander struct {
+	discoveryClient discovery.DiscoveryInterface
+}
+
+// NewDiscoveryCategoryExpander returns a category expander that makes use of the "categories" fields from
+// the API, found through the discovery client. In case of any error or no category found (which likely
+// means we're at a cluster prior to categories support, fallback to the expander provided.
+func NewDiscoveryCategoryExpander(client discovery.DiscoveryInterface) CategoryExpander {
+	if client == nil {
+		panic("Please provide discovery client to shortcut expander")
+	}
+	return discoveryCategoryExpander{discoveryClient: client}
+}
+
+// Expand fulfills CategoryExpander
+func (e discoveryCategoryExpander) Expand(category string) ([]schema.GroupResource, bool) {
+	// Get all supported resources for groups and versions from server, if no resource found, fallback anyway.
+	apiResourceLists, _ := e.discoveryClient.ServerResources()
+	if len(apiResourceLists) == 0 {
+		return nil, false
+	}
+
+	discoveredExpansions := map[string][]schema.GroupResource{}
+	for _, apiResourceList := range apiResourceLists {
+		gv, err := schema.ParseGroupVersion(apiResourceList.GroupVersion)
+		if err != nil {
+			continue
+		}
+		// Collect GroupVersions by categories
+		for _, apiResource := range apiResourceList.APIResources {
+			if categories := apiResource.Categories; len(categories) > 0 {
+				for _, category := range categories {
+					groupResource := schema.GroupResource{
+						Group:    gv.Group,
+						Resource: apiResource.Name,
+					}
+					discoveredExpansions[category] = append(discoveredExpansions[category], groupResource)
+				}
+			}
+		}
+	}
+
+	ret, ok := discoveredExpansions[category]
+	return ret, ok
+}
+
+// UnionCategoryExpander implements CategoryExpander interface.
+// It maps given category string to union of expansions returned by all the CategoryExpanders in the list.
+type UnionCategoryExpander []CategoryExpander
+
+// Expand fulfills CategoryExpander
+func (u UnionCategoryExpander) Expand(category string) ([]schema.GroupResource, bool) {
+	ret := []schema.GroupResource{}
+	ok := false
+
+	// Expand the category for each CategoryExpander in the list and merge/combine the results.
+	for _, expansion := range u {
+		curr, currOk := expansion.Expand(category)
+
+		for _, currGR := range curr {
+			found := false
+			for _, existing := range ret {
+				if existing == currGR {
+					found = true
+					break
+				}
+			}
+			if !found {
+				ret = append(ret, currGR)
+			}
+		}
+		ok = ok || currOk
+	}
+
+	return ret, ok
+}
diff --git a/vendor/k8s.io/client-go/restmapper/discovery.go b/vendor/k8s.io/client-go/restmapper/discovery.go
new file mode 100644
index 00000000..f8d7080d
--- /dev/null
+++ b/vendor/k8s.io/client-go/restmapper/discovery.go
@@ -0,0 +1,338 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package restmapper
+
+import (
+	"fmt"
+	"strings"
+	"sync"
+
+	"k8s.io/apimachinery/pkg/api/meta"
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	"k8s.io/apimachinery/pkg/runtime/schema"
+	"k8s.io/client-go/discovery"
+
+	"k8s.io/klog"
+)
+
+// APIGroupResources is an API group with a mapping of versions to
+// resources.
+type APIGroupResources struct {
+	Group metav1.APIGroup
+	// A mapping of version string to a slice of APIResources for
+	// that version.
+	VersionedResources map[string][]metav1.APIResource
+}
+
+// NewDiscoveryRESTMapper returns a PriorityRESTMapper based on the discovered
+// groups and resources passed in.
+func NewDiscoveryRESTMapper(groupResources []*APIGroupResources) meta.RESTMapper {
+	unionMapper := meta.MultiRESTMapper{}
+
+	var groupPriority []string
+	// /v1 is special.  It should always come first
+	resourcePriority := []schema.GroupVersionResource{{Group: "", Version: "v1", Resource: meta.AnyResource}}
+	kindPriority := []schema.GroupVersionKind{{Group: "", Version: "v1", Kind: meta.AnyKind}}
+
+	for _, group := range groupResources {
+		groupPriority = append(groupPriority, group.Group.Name)
+
+		// Make sure the preferred version comes first
+		if len(group.Group.PreferredVersion.Version) != 0 {
+			preferred := group.Group.PreferredVersion.Version
+			if _, ok := group.VersionedResources[preferred]; ok {
+				resourcePriority = append(resourcePriority, schema.GroupVersionResource{
+					Group:    group.Group.Name,
+					Version:  group.Group.PreferredVersion.Version,
+					Resource: meta.AnyResource,
+				})
+
+				kindPriority = append(kindPriority, schema.GroupVersionKind{
+					Group:   group.Group.Name,
+					Version: group.Group.PreferredVersion.Version,
+					Kind:    meta.AnyKind,
+				})
+			}
+		}
+
+		for _, discoveryVersion := range group.Group.Versions {
+			resources, ok := group.VersionedResources[discoveryVersion.Version]
+			if !ok {
+				continue
+			}
+
+			// Add non-preferred versions after the preferred version, in case there are resources that only exist in those versions
+			if discoveryVersion.Version != group.Group.PreferredVersion.Version {
+				resourcePriority = append(resourcePriority, schema.GroupVersionResource{
+					Group:    group.Group.Name,
+					Version:  discoveryVersion.Version,
+					Resource: meta.AnyResource,
+				})
+
+				kindPriority = append(kindPriority, schema.GroupVersionKind{
+					Group:   group.Group.Name,
+					Version: discoveryVersion.Version,
+					Kind:    meta.AnyKind,
+				})
+			}
+
+			gv := schema.GroupVersion{Group: group.Group.Name, Version: discoveryVersion.Version}
+			versionMapper := meta.NewDefaultRESTMapper([]schema.GroupVersion{gv})
+
+			for _, resource := range resources {
+				scope := meta.RESTScopeNamespace
+				if !resource.Namespaced {
+					scope = meta.RESTScopeRoot
+				}
+
+				// if we have a slash, then this is a subresource and we shouldn't create mappings for those.
+				if strings.Contains(resource.Name, "/") {
+					continue
+				}
+
+				plural := gv.WithResource(resource.Name)
+				singular := gv.WithResource(resource.SingularName)
+				// this is for legacy resources and servers which don't list singular forms.  For those we must still guess.
+				if len(resource.SingularName) == 0 {
+					_, singular = meta.UnsafeGuessKindToResource(gv.WithKind(resource.Kind))
+				}
+
+				versionMapper.AddSpecific(gv.WithKind(strings.ToLower(resource.Kind)), plural, singular, scope)
+				versionMapper.AddSpecific(gv.WithKind(resource.Kind), plural, singular, scope)
+				// TODO this is producing unsafe guesses that don't actually work, but it matches previous behavior
+				versionMapper.Add(gv.WithKind(resource.Kind+"List"), scope)
+			}
+			// TODO why is this type not in discovery (at least for "v1")
+			versionMapper.Add(gv.WithKind("List"), meta.RESTScopeRoot)
+			unionMapper = append(unionMapper, versionMapper)
+		}
+	}
+
+	for _, group := range groupPriority {
+		resourcePriority = append(resourcePriority, schema.GroupVersionResource{
+			Group:    group,
+			Version:  meta.AnyVersion,
+			Resource: meta.AnyResource,
+		})
+		kindPriority = append(kindPriority, schema.GroupVersionKind{
+			Group:   group,
+			Version: meta.AnyVersion,
+			Kind:    meta.AnyKind,
+		})
+	}
+
+	return meta.PriorityRESTMapper{
+		Delegate:         unionMapper,
+		ResourcePriority: resourcePriority,
+		KindPriority:     kindPriority,
+	}
+}
+
+// GetAPIGroupResources uses the provided discovery client to gather
+// discovery information and populate a slice of APIGroupResources.
+func GetAPIGroupResources(cl discovery.DiscoveryInterface) ([]*APIGroupResources, error) {
+	gs, rs, err := cl.ServerGroupsAndResources()
+	if rs == nil || gs == nil {
+		return nil, err
+		// TODO track the errors and update callers to handle partial errors.
+	}
+	rsm := map[string]*metav1.APIResourceList{}
+	for _, r := range rs {
+		rsm[r.GroupVersion] = r
+	}
+
+	var result []*APIGroupResources
+	for _, group := range gs {
+		groupResources := &APIGroupResources{
+			Group:              *group,
+			VersionedResources: make(map[string][]metav1.APIResource),
+		}
+		for _, version := range group.Versions {
+			resources, ok := rsm[version.GroupVersion]
+			if !ok {
+				continue
+			}
+			groupResources.VersionedResources[version.Version] = resources.APIResources
+		}
+		result = append(result, groupResources)
+	}
+	return result, nil
+}
+
+// DeferredDiscoveryRESTMapper is a RESTMapper that will defer
+// initialization of the RESTMapper until the first mapping is
+// requested.
+type DeferredDiscoveryRESTMapper struct {
+	initMu   sync.Mutex
+	delegate meta.RESTMapper
+	cl       discovery.CachedDiscoveryInterface
+}
+
+// NewDeferredDiscoveryRESTMapper returns a
+// DeferredDiscoveryRESTMapper that will lazily query the provided
+// client for discovery information to do REST mappings.
+func NewDeferredDiscoveryRESTMapper(cl discovery.CachedDiscoveryInterface) *DeferredDiscoveryRESTMapper {
+	return &DeferredDiscoveryRESTMapper{
+		cl: cl,
+	}
+}
+
+func (d *DeferredDiscoveryRESTMapper) getDelegate() (meta.RESTMapper, error) {
+	d.initMu.Lock()
+	defer d.initMu.Unlock()
+
+	if d.delegate != nil {
+		return d.delegate, nil
+	}
+
+	groupResources, err := GetAPIGroupResources(d.cl)
+	if err != nil {
+		return nil, err
+	}
+
+	d.delegate = NewDiscoveryRESTMapper(groupResources)
+	return d.delegate, err
+}
+
+// Reset resets the internally cached Discovery information and will
+// cause the next mapping request to re-discover.
+func (d *DeferredDiscoveryRESTMapper) Reset() {
+	klog.V(5).Info("Invalidating discovery information")
+
+	d.initMu.Lock()
+	defer d.initMu.Unlock()
+
+	d.cl.Invalidate()
+	d.delegate = nil
+}
+
+// KindFor takes a partial resource and returns back the single match.
+// It returns an error if there are multiple matches.
+func (d *DeferredDiscoveryRESTMapper) KindFor(resource schema.GroupVersionResource) (gvk schema.GroupVersionKind, err error) {
+	del, err := d.getDelegate()
+	if err != nil {
+		return schema.GroupVersionKind{}, err
+	}
+	gvk, err = del.KindFor(resource)
+	if err != nil && !d.cl.Fresh() {
+		d.Reset()
+		gvk, err = d.KindFor(resource)
+	}
+	return
+}
+
+// KindsFor takes a partial resource and returns back the list of
+// potential kinds in priority order.
+func (d *DeferredDiscoveryRESTMapper) KindsFor(resource schema.GroupVersionResource) (gvks []schema.GroupVersionKind, err error) {
+	del, err := d.getDelegate()
+	if err != nil {
+		return nil, err
+	}
+	gvks, err = del.KindsFor(resource)
+	if len(gvks) == 0 && !d.cl.Fresh() {
+		d.Reset()
+		gvks, err = d.KindsFor(resource)
+	}
+	return
+}
+
+// ResourceFor takes a partial resource and returns back the single
+// match. It returns an error if there are multiple matches.
+func (d *DeferredDiscoveryRESTMapper) ResourceFor(input schema.GroupVersionResource) (gvr schema.GroupVersionResource, err error) {
+	del, err := d.getDelegate()
+	if err != nil {
+		return schema.GroupVersionResource{}, err
+	}
+	gvr, err = del.ResourceFor(input)
+	if err != nil && !d.cl.Fresh() {
+		d.Reset()
+		gvr, err = d.ResourceFor(input)
+	}
+	return
+}
+
+// ResourcesFor takes a partial resource and returns back the list of
+// potential resource in priority order.
+func (d *DeferredDiscoveryRESTMapper) ResourcesFor(input schema.GroupVersionResource) (gvrs []schema.GroupVersionResource, err error) {
+	del, err := d.getDelegate()
+	if err != nil {
+		return nil, err
+	}
+	gvrs, err = del.ResourcesFor(input)
+	if len(gvrs) == 0 && !d.cl.Fresh() {
+		d.Reset()
+		gvrs, err = d.ResourcesFor(input)
+	}
+	return
+}
+
+// RESTMapping identifies a preferred resource mapping for the
+// provided group kind.
+func (d *DeferredDiscoveryRESTMapper) RESTMapping(gk schema.GroupKind, versions ...string) (m *meta.RESTMapping, err error) {
+	del, err := d.getDelegate()
+	if err != nil {
+		return nil, err
+	}
+	m, err = del.RESTMapping(gk, versions...)
+	if err != nil && !d.cl.Fresh() {
+		d.Reset()
+		m, err = d.RESTMapping(gk, versions...)
+	}
+	return
+}
+
+// RESTMappings returns the RESTMappings for the provided group kind
+// in a rough internal preferred order. If no kind is found, it will
+// return a NoResourceMatchError.
+func (d *DeferredDiscoveryRESTMapper) RESTMappings(gk schema.GroupKind, versions ...string) (ms []*meta.RESTMapping, err error) {
+	del, err := d.getDelegate()
+	if err != nil {
+		return nil, err
+	}
+	ms, err = del.RESTMappings(gk, versions...)
+	if len(ms) == 0 && !d.cl.Fresh() {
+		d.Reset()
+		ms, err = d.RESTMappings(gk, versions...)
+	}
+	return
+}
+
+// ResourceSingularizer converts a resource name from plural to
+// singular (e.g., from pods to pod).
+func (d *DeferredDiscoveryRESTMapper) ResourceSingularizer(resource string) (singular string, err error) {
+	del, err := d.getDelegate()
+	if err != nil {
+		return resource, err
+	}
+	singular, err = del.ResourceSingularizer(resource)
+	if err != nil && !d.cl.Fresh() {
+		d.Reset()
+		singular, err = d.ResourceSingularizer(resource)
+	}
+	return
+}
+
+func (d *DeferredDiscoveryRESTMapper) String() string {
+	del, err := d.getDelegate()
+	if err != nil {
+		return fmt.Sprintf("DeferredDiscoveryRESTMapper{%v}", err)
+	}
+	return fmt.Sprintf("DeferredDiscoveryRESTMapper{\n\t%v\n}", del)
+}
+
+// Make sure it satisfies the interface
+var _ meta.RESTMapper = &DeferredDiscoveryRESTMapper{}
diff --git a/vendor/k8s.io/client-go/restmapper/shortcut.go b/vendor/k8s.io/client-go/restmapper/shortcut.go
new file mode 100644
index 00000000..6f3c9d93
--- /dev/null
+++ b/vendor/k8s.io/client-go/restmapper/shortcut.go
@@ -0,0 +1,172 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package restmapper
+
+import (
+	"strings"
+
+	"k8s.io/klog"
+
+	"k8s.io/apimachinery/pkg/api/meta"
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	"k8s.io/apimachinery/pkg/runtime/schema"
+	"k8s.io/client-go/discovery"
+)
+
+// shortcutExpander is a RESTMapper that can be used for Kubernetes resources.   It expands the resource first, then invokes the wrapped
+type shortcutExpander struct {
+	RESTMapper meta.RESTMapper
+
+	discoveryClient discovery.DiscoveryInterface
+}
+
+var _ meta.RESTMapper = &shortcutExpander{}
+
+// NewShortcutExpander wraps a restmapper in a layer that expands shortcuts found via discovery
+func NewShortcutExpander(delegate meta.RESTMapper, client discovery.DiscoveryInterface) meta.RESTMapper {
+	return shortcutExpander{RESTMapper: delegate, discoveryClient: client}
+}
+
+// KindFor fulfills meta.RESTMapper
+func (e shortcutExpander) KindFor(resource schema.GroupVersionResource) (schema.GroupVersionKind, error) {
+	return e.RESTMapper.KindFor(e.expandResourceShortcut(resource))
+}
+
+// KindsFor fulfills meta.RESTMapper
+func (e shortcutExpander) KindsFor(resource schema.GroupVersionResource) ([]schema.GroupVersionKind, error) {
+	return e.RESTMapper.KindsFor(e.expandResourceShortcut(resource))
+}
+
+// ResourcesFor fulfills meta.RESTMapper
+func (e shortcutExpander) ResourcesFor(resource schema.GroupVersionResource) ([]schema.GroupVersionResource, error) {
+	return e.RESTMapper.ResourcesFor(e.expandResourceShortcut(resource))
+}
+
+// ResourceFor fulfills meta.RESTMapper
+func (e shortcutExpander) ResourceFor(resource schema.GroupVersionResource) (schema.GroupVersionResource, error) {
+	return e.RESTMapper.ResourceFor(e.expandResourceShortcut(resource))
+}
+
+// ResourceSingularizer fulfills meta.RESTMapper
+func (e shortcutExpander) ResourceSingularizer(resource string) (string, error) {
+	return e.RESTMapper.ResourceSingularizer(e.expandResourceShortcut(schema.GroupVersionResource{Resource: resource}).Resource)
+}
+
+// RESTMapping fulfills meta.RESTMapper
+func (e shortcutExpander) RESTMapping(gk schema.GroupKind, versions ...string) (*meta.RESTMapping, error) {
+	return e.RESTMapper.RESTMapping(gk, versions...)
+}
+
+// RESTMappings fulfills meta.RESTMapper
+func (e shortcutExpander) RESTMappings(gk schema.GroupKind, versions ...string) ([]*meta.RESTMapping, error) {
+	return e.RESTMapper.RESTMappings(gk, versions...)
+}
+
+// getShortcutMappings returns a set of tuples which holds short names for resources.
+// First the list of potential resources will be taken from the API server.
+// Next we will append the hardcoded list of resources - to be backward compatible with old servers.
+// NOTE that the list is ordered by group priority.
+func (e shortcutExpander) getShortcutMappings() ([]*metav1.APIResourceList, []resourceShortcuts, error) {
+	res := []resourceShortcuts{}
+	// get server resources
+	// This can return an error *and* the results it was able to find.  We don't need to fail on the error.
+	apiResList, err := e.discoveryClient.ServerResources()
+	if err != nil {
+		klog.V(1).Infof("Error loading discovery information: %v", err)
+	}
+	for _, apiResources := range apiResList {
+		gv, err := schema.ParseGroupVersion(apiResources.GroupVersion)
+		if err != nil {
+			klog.V(1).Infof("Unable to parse groupversion = %s due to = %s", apiResources.GroupVersion, err.Error())
+			continue
+		}
+		for _, apiRes := range apiResources.APIResources {
+			for _, shortName := range apiRes.ShortNames {
+				rs := resourceShortcuts{
+					ShortForm: schema.GroupResource{Group: gv.Group, Resource: shortName},
+					LongForm:  schema.GroupResource{Group: gv.Group, Resource: apiRes.Name},
+				}
+				res = append(res, rs)
+			}
+		}
+	}
+
+	return apiResList, res, nil
+}
+
+// expandResourceShortcut will return the expanded version of resource
+// (something that a pkg/api/meta.RESTMapper can understand), if it is
+// indeed a shortcut. If no match has been found, we will match on group prefixing.
+// Lastly we will return resource unmodified.
+func (e shortcutExpander) expandResourceShortcut(resource schema.GroupVersionResource) schema.GroupVersionResource {
+	// get the shortcut mappings and return on first match.
+	if allResources, shortcutResources, err := e.getShortcutMappings(); err == nil {
+		// avoid expanding if there's an exact match to a full resource name
+		for _, apiResources := range allResources {
+			gv, err := schema.ParseGroupVersion(apiResources.GroupVersion)
+			if err != nil {
+				continue
+			}
+			if len(resource.Group) != 0 && resource.Group != gv.Group {
+				continue
+			}
+			for _, apiRes := range apiResources.APIResources {
+				if resource.Resource == apiRes.Name {
+					return resource
+				}
+				if resource.Resource == apiRes.SingularName {
+					return resource
+				}
+			}
+		}
+
+		for _, item := range shortcutResources {
+			if len(resource.Group) != 0 && resource.Group != item.ShortForm.Group {
+				continue
+			}
+			if resource.Resource == item.ShortForm.Resource {
+				resource.Resource = item.LongForm.Resource
+				resource.Group = item.LongForm.Group
+				return resource
+			}
+		}
+
+		// we didn't find exact match so match on group prefixing. This allows autoscal to match autoscaling
+		if len(resource.Group) == 0 {
+			return resource
+		}
+		for _, item := range shortcutResources {
+			if !strings.HasPrefix(item.ShortForm.Group, resource.Group) {
+				continue
+			}
+			if resource.Resource == item.ShortForm.Resource {
+				resource.Resource = item.LongForm.Resource
+				resource.Group = item.LongForm.Group
+				return resource
+			}
+		}
+	}
+
+	return resource
+}
+
+// ResourceShortcuts represents a structure that holds the information how to
+// transition from resource's shortcut to its full name.
+type resourceShortcuts struct {
+	ShortForm schema.GroupResource
+	LongForm  schema.GroupResource
+}
diff --git a/vendor/k8s.io/client-go/scale/client.go b/vendor/k8s.io/client-go/scale/client.go
new file mode 100644
index 00000000..1306b37d
--- /dev/null
+++ b/vendor/k8s.io/client-go/scale/client.go
@@ -0,0 +1,238 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package scale
+
+import (
+	"context"
+	"fmt"
+
+	autoscaling "k8s.io/api/autoscaling/v1"
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	"k8s.io/apimachinery/pkg/runtime"
+	"k8s.io/apimachinery/pkg/runtime/schema"
+	serializer "k8s.io/apimachinery/pkg/runtime/serializer"
+	"k8s.io/apimachinery/pkg/types"
+	"k8s.io/client-go/dynamic"
+	restclient "k8s.io/client-go/rest"
+)
+
+var scaleConverter = NewScaleConverter()
+var codecs = serializer.NewCodecFactory(scaleConverter.Scheme())
+var parameterScheme = runtime.NewScheme()
+var dynamicParameterCodec = runtime.NewParameterCodec(parameterScheme)
+
+var versionV1 = schema.GroupVersion{Version: "v1"}
+
+func init() {
+	metav1.AddToGroupVersion(parameterScheme, versionV1)
+}
+
+// scaleClient is an implementation of ScalesGetter
+// which makes use of a RESTMapper and a generic REST
+// client to support an discoverable resource.
+// It behaves somewhat similarly to the dynamic ClientPool,
+// but is more specifically scoped to Scale.
+type scaleClient struct {
+	mapper PreferredResourceMapper
+
+	apiPathResolverFunc dynamic.APIPathResolverFunc
+	scaleKindResolver   ScaleKindResolver
+	clientBase          restclient.Interface
+}
+
+// NewForConfig creates a new ScalesGetter which resolves kinds
+// to resources using the given RESTMapper, and API paths using
+// the given dynamic.APIPathResolverFunc.
+func NewForConfig(cfg *restclient.Config, mapper PreferredResourceMapper, resolver dynamic.APIPathResolverFunc, scaleKindResolver ScaleKindResolver) (ScalesGetter, error) {
+	// so that the RESTClientFor doesn't complain
+	cfg.GroupVersion = &schema.GroupVersion{}
+
+	cfg.NegotiatedSerializer = codecs.WithoutConversion()
+	if len(cfg.UserAgent) == 0 {
+		cfg.UserAgent = restclient.DefaultKubernetesUserAgent()
+	}
+
+	client, err := restclient.RESTClientFor(cfg)
+	if err != nil {
+		return nil, err
+	}
+
+	return New(client, mapper, resolver, scaleKindResolver), nil
+}
+
+// New creates a new ScalesGetter using the given client to make requests.
+// The GroupVersion on the client is ignored.
+func New(baseClient restclient.Interface, mapper PreferredResourceMapper, resolver dynamic.APIPathResolverFunc, scaleKindResolver ScaleKindResolver) ScalesGetter {
+	return &scaleClient{
+		mapper: mapper,
+
+		apiPathResolverFunc: resolver,
+		scaleKindResolver:   scaleKindResolver,
+		clientBase:          baseClient,
+	}
+}
+
+// apiPathFor returns the absolute api path for the given GroupVersion
+func (c *scaleClient) apiPathFor(groupVer schema.GroupVersion) string {
+	// we need to set the API path based on GroupVersion (defaulting to the legacy path if none is set)
+	// TODO: we "cheat" here since the API path really only depends on group ATM, but this should
+	// *probably* take GroupVersionResource and not GroupVersionKind.
+	apiPath := c.apiPathResolverFunc(groupVer.WithKind(""))
+	if apiPath == "" {
+		apiPath = "/api"
+	}
+
+	return restclient.DefaultVersionedAPIPath(apiPath, groupVer)
+}
+
+// pathAndVersionFor returns the appropriate base path and the associated full GroupVersionResource
+// for the given GroupResource
+func (c *scaleClient) pathAndVersionFor(resource schema.GroupResource) (string, schema.GroupVersionResource, error) {
+	gvr, err := c.mapper.ResourceFor(resource.WithVersion(""))
+	if err != nil {
+		return "", gvr, fmt.Errorf("unable to get full preferred group-version-resource for %s: %v", resource.String(), err)
+	}
+
+	groupVer := gvr.GroupVersion()
+
+	return c.apiPathFor(groupVer), gvr, nil
+}
+
+// namespacedScaleClient is an ScaleInterface for fetching
+// Scales in a given namespace.
+type namespacedScaleClient struct {
+	client    *scaleClient
+	namespace string
+}
+
+// convertToScale converts the response body to autoscaling/v1.Scale
+func convertToScale(result *restclient.Result) (*autoscaling.Scale, error) {
+	scaleBytes, err := result.Raw()
+	if err != nil {
+		return nil, err
+	}
+	decoder := scaleConverter.codecs.UniversalDecoder(scaleConverter.ScaleVersions()...)
+	rawScaleObj, err := runtime.Decode(decoder, scaleBytes)
+	if err != nil {
+		return nil, err
+	}
+
+	// convert whatever this is to autoscaling/v1.Scale
+	scaleObj, err := scaleConverter.ConvertToVersion(rawScaleObj, autoscaling.SchemeGroupVersion)
+	if err != nil {
+		return nil, fmt.Errorf("received an object from a /scale endpoint which was not convertible to autoscaling Scale: %v", err)
+	}
+
+	return scaleObj.(*autoscaling.Scale), nil
+}
+
+func (c *scaleClient) Scales(namespace string) ScaleInterface {
+	return &namespacedScaleClient{
+		client:    c,
+		namespace: namespace,
+	}
+}
+
+func (c *namespacedScaleClient) Get(ctx context.Context, resource schema.GroupResource, name string, opts metav1.GetOptions) (*autoscaling.Scale, error) {
+	// Currently, a /scale endpoint can return different scale types.
+	// Until we have support for the alternative API representations proposal,
+	// we need to deal with accepting different API versions.
+	// In practice, this is autoscaling/v1.Scale and extensions/v1beta1.Scale
+
+	path, gvr, err := c.client.pathAndVersionFor(resource)
+	if err != nil {
+		return nil, fmt.Errorf("unable to get client for %s: %v", resource.String(), err)
+	}
+
+	result := c.client.clientBase.Get().
+		AbsPath(path).
+		NamespaceIfScoped(c.namespace, c.namespace != "").
+		Resource(gvr.Resource).
+		Name(name).
+		SubResource("scale").
+		SpecificallyVersionedParams(&opts, dynamicParameterCodec, versionV1).
+		Do(ctx)
+	if err := result.Error(); err != nil {
+		return nil, err
+	}
+
+	return convertToScale(&result)
+}
+
+func (c *namespacedScaleClient) Update(ctx context.Context, resource schema.GroupResource, scale *autoscaling.Scale, opts metav1.UpdateOptions) (*autoscaling.Scale, error) {
+	path, gvr, err := c.client.pathAndVersionFor(resource)
+	if err != nil {
+		return nil, fmt.Errorf("unable to get client for %s: %v", resource.String(), err)
+	}
+
+	// Currently, a /scale endpoint can receive and return different scale types.
+	// Until we have support for the alternative API representations proposal,
+	// we need to deal with sending and accepting different API versions.
+
+	// figure out what scale we actually need here
+	desiredGVK, err := c.client.scaleKindResolver.ScaleForResource(gvr)
+	if err != nil {
+		return nil, fmt.Errorf("could not find proper group-version for scale subresource of %s: %v", gvr.String(), err)
+	}
+
+	// convert this to whatever this endpoint wants
+	scaleUpdate, err := scaleConverter.ConvertToVersion(scale, desiredGVK.GroupVersion())
+	if err != nil {
+		return nil, fmt.Errorf("could not convert scale update to external Scale: %v", err)
+	}
+	encoder := scaleConverter.codecs.LegacyCodec(desiredGVK.GroupVersion())
+	scaleUpdateBytes, err := runtime.Encode(encoder, scaleUpdate)
+	if err != nil {
+		return nil, fmt.Errorf("could not encode scale update to external Scale: %v", err)
+	}
+
+	result := c.client.clientBase.Put().
+		AbsPath(path).
+		NamespaceIfScoped(c.namespace, c.namespace != "").
+		Resource(gvr.Resource).
+		Name(scale.Name).
+		SubResource("scale").
+		SpecificallyVersionedParams(&opts, dynamicParameterCodec, versionV1).
+		Body(scaleUpdateBytes).
+		Do(ctx)
+	if err := result.Error(); err != nil {
+		// propagate "raw" error from the API
+		// this allows callers to interpret underlying Reason field
+		// for example: errors.IsConflict(err)
+		return nil, err
+	}
+
+	return convertToScale(&result)
+}
+
+func (c *namespacedScaleClient) Patch(ctx context.Context, gvr schema.GroupVersionResource, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions) (*autoscaling.Scale, error) {
+	groupVersion := gvr.GroupVersion()
+	result := c.client.clientBase.Patch(pt).
+		AbsPath(c.client.apiPathFor(groupVersion)).
+		NamespaceIfScoped(c.namespace, c.namespace != "").
+		Resource(gvr.Resource).
+		Name(name).
+		SubResource("scale").
+		SpecificallyVersionedParams(&opts, dynamicParameterCodec, versionV1).
+		Body(data).
+		Do(ctx)
+	if err := result.Error(); err != nil {
+		return nil, err
+	}
+
+	return convertToScale(&result)
+}
diff --git a/vendor/k8s.io/client-go/scale/doc.go b/vendor/k8s.io/client-go/scale/doc.go
new file mode 100644
index 00000000..b6fa3f5f
--- /dev/null
+++ b/vendor/k8s.io/client-go/scale/doc.go
@@ -0,0 +1,21 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package scale provides a polymorphic scale client capable of fetching
+// and updating Scale for any resource which implements the `scale` subresource,
+// as long as that subresource operates on a version of scale convertable to
+// autoscaling.Scale.
+package scale // import "k8s.io/client-go/scale"
diff --git a/vendor/k8s.io/client-go/scale/interfaces.go b/vendor/k8s.io/client-go/scale/interfaces.go
new file mode 100644
index 00000000..a7bb3e6c
--- /dev/null
+++ b/vendor/k8s.io/client-go/scale/interfaces.go
@@ -0,0 +1,47 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package scale
+
+import (
+	"context"
+
+	autoscalingapi "k8s.io/api/autoscaling/v1"
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	"k8s.io/apimachinery/pkg/runtime/schema"
+	"k8s.io/apimachinery/pkg/types"
+)
+
+// ScalesGetter can produce a ScaleInterface
+type ScalesGetter interface {
+	// Scales produces a ScaleInterface for a particular namespace.
+	// Set namespace to the empty string for non-namespaced resources.
+	Scales(namespace string) ScaleInterface
+}
+
+// ScaleInterface can fetch and update scales for
+// resources in a particular namespace which implement
+// the scale subresource.
+type ScaleInterface interface {
+	// Get fetches the scale of the given scalable resource.
+	Get(ctx context.Context, resource schema.GroupResource, name string, opts metav1.GetOptions) (*autoscalingapi.Scale, error)
+
+	// Update updates the scale of the given scalable resource.
+	Update(ctx context.Context, resource schema.GroupResource, scale *autoscalingapi.Scale, opts metav1.UpdateOptions) (*autoscalingapi.Scale, error)
+
+	// Patch patches the scale of the given scalable resource.
+	Patch(ctx context.Context, gvr schema.GroupVersionResource, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions) (*autoscalingapi.Scale, error)
+}
diff --git a/vendor/k8s.io/client-go/scale/scheme/appsint/doc.go b/vendor/k8s.io/client-go/scale/scheme/appsint/doc.go
new file mode 100644
index 00000000..16f29e2a
--- /dev/null
+++ b/vendor/k8s.io/client-go/scale/scheme/appsint/doc.go
@@ -0,0 +1,22 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package appsint contains the necessary scaffolding of the
+// internal version of extensions as required by conversion logic.
+// It doesn't have any of its own types -- it's just necessary to
+// get the expected behavior out of runtime.Scheme.ConvertToVersion
+// and associated methods.
+package appsint
diff --git a/vendor/k8s.io/client-go/scale/scheme/appsint/register.go b/vendor/k8s.io/client-go/scale/scheme/appsint/register.go
new file mode 100644
index 00000000..d3a76b51
--- /dev/null
+++ b/vendor/k8s.io/client-go/scale/scheme/appsint/register.go
@@ -0,0 +1,55 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package appsint
+
+import (
+	appsv1beta2 "k8s.io/api/apps/v1beta2"
+	"k8s.io/apimachinery/pkg/runtime"
+	"k8s.io/apimachinery/pkg/runtime/schema"
+	scalescheme "k8s.io/client-go/scale/scheme"
+)
+
+// GroupName is the group name use in this package
+const GroupName = appsv1beta2.GroupName
+
+// SchemeGroupVersion is group version used to register these objects
+var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal}
+
+// Kind takes an unqualified kind and returns a Group qualified GroupKind
+func Kind(kind string) schema.GroupKind {
+	return SchemeGroupVersion.WithKind(kind).GroupKind()
+}
+
+// Resource takes an unqualified resource and returns a Group qualified GroupResource
+func Resource(resource string) schema.GroupResource {
+	return SchemeGroupVersion.WithResource(resource).GroupResource()
+}
+
+var (
+	// SchemeBuilder points to a list of functions added to Scheme.
+	SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes)
+	// AddToScheme applies all the stored functions to the scheme.
+	AddToScheme = SchemeBuilder.AddToScheme
+)
+
+// Adds the list of known types to api.Scheme.
+func addKnownTypes(scheme *runtime.Scheme) error {
+	scheme.AddKnownTypes(SchemeGroupVersion,
+		&scalescheme.Scale{},
+	)
+	return nil
+}
diff --git a/vendor/k8s.io/client-go/scale/scheme/appsv1beta1/conversion.go b/vendor/k8s.io/client-go/scale/scheme/appsv1beta1/conversion.go
new file mode 100644
index 00000000..f271c825
--- /dev/null
+++ b/vendor/k8s.io/client-go/scale/scheme/appsv1beta1/conversion.go
@@ -0,0 +1,73 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package appsv1beta1
+
+import (
+	"fmt"
+
+	v1beta1 "k8s.io/api/apps/v1beta1"
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	"k8s.io/apimachinery/pkg/conversion"
+	scheme "k8s.io/client-go/scale/scheme"
+)
+
+func Convert_scheme_ScaleStatus_To_v1beta1_ScaleStatus(in *scheme.ScaleStatus, out *v1beta1.ScaleStatus, s conversion.Scope) error {
+	out.Replicas = in.Replicas
+	out.Selector = nil
+	out.TargetSelector = ""
+	if in.Selector != nil {
+		if in.Selector.MatchExpressions == nil || len(in.Selector.MatchExpressions) == 0 {
+			out.Selector = in.Selector.MatchLabels
+		}
+
+		selector, err := metav1.LabelSelectorAsSelector(in.Selector)
+		if err != nil {
+			return fmt.Errorf("invalid label selector: %v", err)
+		}
+		out.TargetSelector = selector.String()
+	}
+
+	return nil
+}
+
+func Convert_v1beta1_ScaleStatus_To_scheme_ScaleStatus(in *v1beta1.ScaleStatus, out *scheme.ScaleStatus, s conversion.Scope) error {
+	out.Replicas = in.Replicas
+
+	// Normally when 2 fields map to the same internal value we favor the old field, since
+	// old clients can't be expected to know about new fields but clients that know about the
+	// new field can be expected to know about the old field (though that's not quite true, due
+	// to kubectl apply). However, these fields are readonly, so any non-nil value should work.
+	if in.TargetSelector != "" {
+		labelSelector, err := metav1.ParseToLabelSelector(in.TargetSelector)
+		if err != nil {
+			out.Selector = nil
+			return fmt.Errorf("failed to parse target selector: %v", err)
+		}
+		out.Selector = labelSelector
+	} else if in.Selector != nil {
+		out.Selector = new(metav1.LabelSelector)
+		selector := make(map[string]string)
+		for key, val := range in.Selector {
+			selector[key] = val
+		}
+		out.Selector.MatchLabels = selector
+	} else {
+		out.Selector = nil
+	}
+
+	return nil
+}
diff --git a/vendor/k8s.io/client-go/scale/scheme/appsv1beta1/doc.go b/vendor/k8s.io/client-go/scale/scheme/appsv1beta1/doc.go
new file mode 100644
index 00000000..830619b4
--- /dev/null
+++ b/vendor/k8s.io/client-go/scale/scheme/appsv1beta1/doc.go
@@ -0,0 +1,20 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// +k8s:conversion-gen=k8s.io/kubernetes/vendor/k8s.io/client-go/scale/scheme
+// +k8s:conversion-gen-external-types=k8s.io/api/apps/v1beta1
+
+package appsv1beta1 // import "k8s.io/client-go/scale/scheme/appsv1beta1"
diff --git a/vendor/k8s.io/client-go/scale/scheme/appsv1beta1/register.go b/vendor/k8s.io/client-go/scale/scheme/appsv1beta1/register.go
new file mode 100644
index 00000000..f11fcbd0
--- /dev/null
+++ b/vendor/k8s.io/client-go/scale/scheme/appsv1beta1/register.go
@@ -0,0 +1,45 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package appsv1beta1
+
+import (
+	appsapiv1beta1 "k8s.io/api/apps/v1beta1"
+	"k8s.io/apimachinery/pkg/runtime/schema"
+)
+
+// GroupName is the group name use in this package
+const GroupName = appsapiv1beta1.GroupName
+
+// SchemeGroupVersion is group version used to register these objects
+var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1beta1"}
+
+// Resource takes an unqualified resource and returns a Group qualified GroupResource
+func Resource(resource string) schema.GroupResource {
+	return SchemeGroupVersion.WithResource(resource).GroupResource()
+}
+
+var (
+	localSchemeBuilder = &appsapiv1beta1.SchemeBuilder
+	AddToScheme        = localSchemeBuilder.AddToScheme
+)
+
+func init() {
+	// We only register manually written functions here. The registration of the
+	// generated functions takes place in the generated files. The separation
+	// makes the code compile even when the generated files are missing.
+	localSchemeBuilder.Register()
+}
diff --git a/vendor/k8s.io/client-go/scale/scheme/appsv1beta1/zz_generated.conversion.go b/vendor/k8s.io/client-go/scale/scheme/appsv1beta1/zz_generated.conversion.go
new file mode 100644
index 00000000..02a36051
--- /dev/null
+++ b/vendor/k8s.io/client-go/scale/scheme/appsv1beta1/zz_generated.conversion.go
@@ -0,0 +1,133 @@
+// +build !ignore_autogenerated
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by conversion-gen. DO NOT EDIT.
+
+package appsv1beta1
+
+import (
+	v1beta1 "k8s.io/api/apps/v1beta1"
+	conversion "k8s.io/apimachinery/pkg/conversion"
+	runtime "k8s.io/apimachinery/pkg/runtime"
+	scheme "k8s.io/client-go/scale/scheme"
+)
+
+func init() {
+	localSchemeBuilder.Register(RegisterConversions)
+}
+
+// RegisterConversions adds conversion functions to the given scheme.
+// Public to allow building arbitrary schemes.
+func RegisterConversions(s *runtime.Scheme) error {
+	if err := s.AddGeneratedConversionFunc((*v1beta1.Scale)(nil), (*scheme.Scale)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_v1beta1_Scale_To_scheme_Scale(a.(*v1beta1.Scale), b.(*scheme.Scale), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddGeneratedConversionFunc((*scheme.Scale)(nil), (*v1beta1.Scale)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_scheme_Scale_To_v1beta1_Scale(a.(*scheme.Scale), b.(*v1beta1.Scale), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddGeneratedConversionFunc((*v1beta1.ScaleSpec)(nil), (*scheme.ScaleSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_v1beta1_ScaleSpec_To_scheme_ScaleSpec(a.(*v1beta1.ScaleSpec), b.(*scheme.ScaleSpec), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddGeneratedConversionFunc((*scheme.ScaleSpec)(nil), (*v1beta1.ScaleSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_scheme_ScaleSpec_To_v1beta1_ScaleSpec(a.(*scheme.ScaleSpec), b.(*v1beta1.ScaleSpec), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddConversionFunc((*scheme.ScaleStatus)(nil), (*v1beta1.ScaleStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_scheme_ScaleStatus_To_v1beta1_ScaleStatus(a.(*scheme.ScaleStatus), b.(*v1beta1.ScaleStatus), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddConversionFunc((*v1beta1.ScaleStatus)(nil), (*scheme.ScaleStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_v1beta1_ScaleStatus_To_scheme_ScaleStatus(a.(*v1beta1.ScaleStatus), b.(*scheme.ScaleStatus), scope)
+	}); err != nil {
+		return err
+	}
+	return nil
+}
+
+func autoConvert_v1beta1_Scale_To_scheme_Scale(in *v1beta1.Scale, out *scheme.Scale, s conversion.Scope) error {
+	out.ObjectMeta = in.ObjectMeta
+	if err := Convert_v1beta1_ScaleSpec_To_scheme_ScaleSpec(&in.Spec, &out.Spec, s); err != nil {
+		return err
+	}
+	if err := Convert_v1beta1_ScaleStatus_To_scheme_ScaleStatus(&in.Status, &out.Status, s); err != nil {
+		return err
+	}
+	return nil
+}
+
+// Convert_v1beta1_Scale_To_scheme_Scale is an autogenerated conversion function.
+func Convert_v1beta1_Scale_To_scheme_Scale(in *v1beta1.Scale, out *scheme.Scale, s conversion.Scope) error {
+	return autoConvert_v1beta1_Scale_To_scheme_Scale(in, out, s)
+}
+
+func autoConvert_scheme_Scale_To_v1beta1_Scale(in *scheme.Scale, out *v1beta1.Scale, s conversion.Scope) error {
+	out.ObjectMeta = in.ObjectMeta
+	if err := Convert_scheme_ScaleSpec_To_v1beta1_ScaleSpec(&in.Spec, &out.Spec, s); err != nil {
+		return err
+	}
+	if err := Convert_scheme_ScaleStatus_To_v1beta1_ScaleStatus(&in.Status, &out.Status, s); err != nil {
+		return err
+	}
+	return nil
+}
+
+// Convert_scheme_Scale_To_v1beta1_Scale is an autogenerated conversion function.
+func Convert_scheme_Scale_To_v1beta1_Scale(in *scheme.Scale, out *v1beta1.Scale, s conversion.Scope) error {
+	return autoConvert_scheme_Scale_To_v1beta1_Scale(in, out, s)
+}
+
+func autoConvert_v1beta1_ScaleSpec_To_scheme_ScaleSpec(in *v1beta1.ScaleSpec, out *scheme.ScaleSpec, s conversion.Scope) error {
+	out.Replicas = in.Replicas
+	return nil
+}
+
+// Convert_v1beta1_ScaleSpec_To_scheme_ScaleSpec is an autogenerated conversion function.
+func Convert_v1beta1_ScaleSpec_To_scheme_ScaleSpec(in *v1beta1.ScaleSpec, out *scheme.ScaleSpec, s conversion.Scope) error {
+	return autoConvert_v1beta1_ScaleSpec_To_scheme_ScaleSpec(in, out, s)
+}
+
+func autoConvert_scheme_ScaleSpec_To_v1beta1_ScaleSpec(in *scheme.ScaleSpec, out *v1beta1.ScaleSpec, s conversion.Scope) error {
+	out.Replicas = in.Replicas
+	return nil
+}
+
+// Convert_scheme_ScaleSpec_To_v1beta1_ScaleSpec is an autogenerated conversion function.
+func Convert_scheme_ScaleSpec_To_v1beta1_ScaleSpec(in *scheme.ScaleSpec, out *v1beta1.ScaleSpec, s conversion.Scope) error {
+	return autoConvert_scheme_ScaleSpec_To_v1beta1_ScaleSpec(in, out, s)
+}
+
+func autoConvert_v1beta1_ScaleStatus_To_scheme_ScaleStatus(in *v1beta1.ScaleStatus, out *scheme.ScaleStatus, s conversion.Scope) error {
+	out.Replicas = in.Replicas
+	// WARNING: in.Selector requires manual conversion: inconvertible types (map[string]string vs *k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelector)
+	// WARNING: in.TargetSelector requires manual conversion: does not exist in peer-type
+	return nil
+}
+
+func autoConvert_scheme_ScaleStatus_To_v1beta1_ScaleStatus(in *scheme.ScaleStatus, out *v1beta1.ScaleStatus, s conversion.Scope) error {
+	out.Replicas = in.Replicas
+	// WARNING: in.Selector requires manual conversion: inconvertible types (*k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelector vs map[string]string)
+	return nil
+}
diff --git a/vendor/k8s.io/client-go/scale/scheme/appsv1beta2/conversion.go b/vendor/k8s.io/client-go/scale/scheme/appsv1beta2/conversion.go
new file mode 100644
index 00000000..35d15c30
--- /dev/null
+++ b/vendor/k8s.io/client-go/scale/scheme/appsv1beta2/conversion.go
@@ -0,0 +1,73 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package appsv1beta2
+
+import (
+	"fmt"
+
+	v1beta2 "k8s.io/api/apps/v1beta2"
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	"k8s.io/apimachinery/pkg/conversion"
+	scheme "k8s.io/client-go/scale/scheme"
+)
+
+func Convert_scheme_ScaleStatus_To_v1beta2_ScaleStatus(in *scheme.ScaleStatus, out *v1beta2.ScaleStatus, s conversion.Scope) error {
+	out.Replicas = in.Replicas
+	out.Selector = nil
+	out.TargetSelector = ""
+	if in.Selector != nil {
+		if in.Selector.MatchExpressions == nil || len(in.Selector.MatchExpressions) == 0 {
+			out.Selector = in.Selector.MatchLabels
+		}
+
+		selector, err := metav1.LabelSelectorAsSelector(in.Selector)
+		if err != nil {
+			return fmt.Errorf("invalid label selector: %v", err)
+		}
+		out.TargetSelector = selector.String()
+	}
+
+	return nil
+}
+
+func Convert_v1beta2_ScaleStatus_To_scheme_ScaleStatus(in *v1beta2.ScaleStatus, out *scheme.ScaleStatus, s conversion.Scope) error {
+	out.Replicas = in.Replicas
+
+	// Normally when 2 fields map to the same internal value we favor the old field, since
+	// old clients can't be expected to know about new fields but clients that know about the
+	// new field can be expected to know about the old field (though that's not quite true, due
+	// to kubectl apply). However, these fields are readonly, so any non-nil value should work.
+	if in.TargetSelector != "" {
+		labelSelector, err := metav1.ParseToLabelSelector(in.TargetSelector)
+		if err != nil {
+			out.Selector = nil
+			return fmt.Errorf("failed to parse target selector: %v", err)
+		}
+		out.Selector = labelSelector
+	} else if in.Selector != nil {
+		out.Selector = new(metav1.LabelSelector)
+		selector := make(map[string]string)
+		for key, val := range in.Selector {
+			selector[key] = val
+		}
+		out.Selector.MatchLabels = selector
+	} else {
+		out.Selector = nil
+	}
+
+	return nil
+}
diff --git a/vendor/k8s.io/client-go/scale/scheme/appsv1beta2/doc.go b/vendor/k8s.io/client-go/scale/scheme/appsv1beta2/doc.go
new file mode 100644
index 00000000..c21a56d5
--- /dev/null
+++ b/vendor/k8s.io/client-go/scale/scheme/appsv1beta2/doc.go
@@ -0,0 +1,20 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// +k8s:conversion-gen=k8s.io/kubernetes/vendor/k8s.io/client-go/scale/scheme
+// +k8s:conversion-gen-external-types=k8s.io/api/apps/v1beta2
+
+package appsv1beta2 // import "k8s.io/client-go/scale/scheme/appsv1beta2"
diff --git a/vendor/k8s.io/client-go/scale/scheme/appsv1beta2/register.go b/vendor/k8s.io/client-go/scale/scheme/appsv1beta2/register.go
new file mode 100644
index 00000000..5e8a5d20
--- /dev/null
+++ b/vendor/k8s.io/client-go/scale/scheme/appsv1beta2/register.go
@@ -0,0 +1,45 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package appsv1beta2
+
+import (
+	appsapiv1beta2 "k8s.io/api/apps/v1beta2"
+	"k8s.io/apimachinery/pkg/runtime/schema"
+)
+
+// GroupName is the group name use in this package
+const GroupName = appsapiv1beta2.GroupName
+
+// SchemeGroupVersion is group version used to register these objects
+var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1beta2"}
+
+// Resource takes an unqualified resource and returns a Group qualified GroupResource
+func Resource(resource string) schema.GroupResource {
+	return SchemeGroupVersion.WithResource(resource).GroupResource()
+}
+
+var (
+	localSchemeBuilder = &appsapiv1beta2.SchemeBuilder
+	AddToScheme        = localSchemeBuilder.AddToScheme
+)
+
+func init() {
+	// We only register manually written functions here. The registration of the
+	// generated functions takes place in the generated files. The separation
+	// makes the code compile even when the generated files are missing.
+	localSchemeBuilder.Register()
+}
diff --git a/vendor/k8s.io/client-go/scale/scheme/appsv1beta2/zz_generated.conversion.go b/vendor/k8s.io/client-go/scale/scheme/appsv1beta2/zz_generated.conversion.go
new file mode 100644
index 00000000..c31ad4b3
--- /dev/null
+++ b/vendor/k8s.io/client-go/scale/scheme/appsv1beta2/zz_generated.conversion.go
@@ -0,0 +1,133 @@
+// +build !ignore_autogenerated
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by conversion-gen. DO NOT EDIT.
+
+package appsv1beta2
+
+import (
+	v1beta2 "k8s.io/api/apps/v1beta2"
+	conversion "k8s.io/apimachinery/pkg/conversion"
+	runtime "k8s.io/apimachinery/pkg/runtime"
+	scheme "k8s.io/client-go/scale/scheme"
+)
+
+func init() {
+	localSchemeBuilder.Register(RegisterConversions)
+}
+
+// RegisterConversions adds conversion functions to the given scheme.
+// Public to allow building arbitrary schemes.
+func RegisterConversions(s *runtime.Scheme) error {
+	if err := s.AddGeneratedConversionFunc((*v1beta2.Scale)(nil), (*scheme.Scale)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_v1beta2_Scale_To_scheme_Scale(a.(*v1beta2.Scale), b.(*scheme.Scale), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddGeneratedConversionFunc((*scheme.Scale)(nil), (*v1beta2.Scale)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_scheme_Scale_To_v1beta2_Scale(a.(*scheme.Scale), b.(*v1beta2.Scale), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddGeneratedConversionFunc((*v1beta2.ScaleSpec)(nil), (*scheme.ScaleSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_v1beta2_ScaleSpec_To_scheme_ScaleSpec(a.(*v1beta2.ScaleSpec), b.(*scheme.ScaleSpec), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddGeneratedConversionFunc((*scheme.ScaleSpec)(nil), (*v1beta2.ScaleSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_scheme_ScaleSpec_To_v1beta2_ScaleSpec(a.(*scheme.ScaleSpec), b.(*v1beta2.ScaleSpec), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddConversionFunc((*scheme.ScaleStatus)(nil), (*v1beta2.ScaleStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_scheme_ScaleStatus_To_v1beta2_ScaleStatus(a.(*scheme.ScaleStatus), b.(*v1beta2.ScaleStatus), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddConversionFunc((*v1beta2.ScaleStatus)(nil), (*scheme.ScaleStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_v1beta2_ScaleStatus_To_scheme_ScaleStatus(a.(*v1beta2.ScaleStatus), b.(*scheme.ScaleStatus), scope)
+	}); err != nil {
+		return err
+	}
+	return nil
+}
+
+func autoConvert_v1beta2_Scale_To_scheme_Scale(in *v1beta2.Scale, out *scheme.Scale, s conversion.Scope) error {
+	out.ObjectMeta = in.ObjectMeta
+	if err := Convert_v1beta2_ScaleSpec_To_scheme_ScaleSpec(&in.Spec, &out.Spec, s); err != nil {
+		return err
+	}
+	if err := Convert_v1beta2_ScaleStatus_To_scheme_ScaleStatus(&in.Status, &out.Status, s); err != nil {
+		return err
+	}
+	return nil
+}
+
+// Convert_v1beta2_Scale_To_scheme_Scale is an autogenerated conversion function.
+func Convert_v1beta2_Scale_To_scheme_Scale(in *v1beta2.Scale, out *scheme.Scale, s conversion.Scope) error {
+	return autoConvert_v1beta2_Scale_To_scheme_Scale(in, out, s)
+}
+
+func autoConvert_scheme_Scale_To_v1beta2_Scale(in *scheme.Scale, out *v1beta2.Scale, s conversion.Scope) error {
+	out.ObjectMeta = in.ObjectMeta
+	if err := Convert_scheme_ScaleSpec_To_v1beta2_ScaleSpec(&in.Spec, &out.Spec, s); err != nil {
+		return err
+	}
+	if err := Convert_scheme_ScaleStatus_To_v1beta2_ScaleStatus(&in.Status, &out.Status, s); err != nil {
+		return err
+	}
+	return nil
+}
+
+// Convert_scheme_Scale_To_v1beta2_Scale is an autogenerated conversion function.
+func Convert_scheme_Scale_To_v1beta2_Scale(in *scheme.Scale, out *v1beta2.Scale, s conversion.Scope) error {
+	return autoConvert_scheme_Scale_To_v1beta2_Scale(in, out, s)
+}
+
+func autoConvert_v1beta2_ScaleSpec_To_scheme_ScaleSpec(in *v1beta2.ScaleSpec, out *scheme.ScaleSpec, s conversion.Scope) error {
+	out.Replicas = in.Replicas
+	return nil
+}
+
+// Convert_v1beta2_ScaleSpec_To_scheme_ScaleSpec is an autogenerated conversion function.
+func Convert_v1beta2_ScaleSpec_To_scheme_ScaleSpec(in *v1beta2.ScaleSpec, out *scheme.ScaleSpec, s conversion.Scope) error {
+	return autoConvert_v1beta2_ScaleSpec_To_scheme_ScaleSpec(in, out, s)
+}
+
+func autoConvert_scheme_ScaleSpec_To_v1beta2_ScaleSpec(in *scheme.ScaleSpec, out *v1beta2.ScaleSpec, s conversion.Scope) error {
+	out.Replicas = in.Replicas
+	return nil
+}
+
+// Convert_scheme_ScaleSpec_To_v1beta2_ScaleSpec is an autogenerated conversion function.
+func Convert_scheme_ScaleSpec_To_v1beta2_ScaleSpec(in *scheme.ScaleSpec, out *v1beta2.ScaleSpec, s conversion.Scope) error {
+	return autoConvert_scheme_ScaleSpec_To_v1beta2_ScaleSpec(in, out, s)
+}
+
+func autoConvert_v1beta2_ScaleStatus_To_scheme_ScaleStatus(in *v1beta2.ScaleStatus, out *scheme.ScaleStatus, s conversion.Scope) error {
+	out.Replicas = in.Replicas
+	// WARNING: in.Selector requires manual conversion: inconvertible types (map[string]string vs *k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelector)
+	// WARNING: in.TargetSelector requires manual conversion: does not exist in peer-type
+	return nil
+}
+
+func autoConvert_scheme_ScaleStatus_To_v1beta2_ScaleStatus(in *scheme.ScaleStatus, out *v1beta2.ScaleStatus, s conversion.Scope) error {
+	out.Replicas = in.Replicas
+	// WARNING: in.Selector requires manual conversion: inconvertible types (*k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelector vs map[string]string)
+	return nil
+}
diff --git a/vendor/k8s.io/client-go/scale/scheme/autoscalingv1/conversion.go b/vendor/k8s.io/client-go/scale/scheme/autoscalingv1/conversion.go
new file mode 100644
index 00000000..36ef82b9
--- /dev/null
+++ b/vendor/k8s.io/client-go/scale/scheme/autoscalingv1/conversion.go
@@ -0,0 +1,54 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package autoscalingv1
+
+import (
+	"fmt"
+
+	v1 "k8s.io/api/autoscaling/v1"
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	"k8s.io/apimachinery/pkg/conversion"
+	scheme "k8s.io/client-go/scale/scheme"
+)
+
+func Convert_scheme_ScaleStatus_To_v1_ScaleStatus(in *scheme.ScaleStatus, out *v1.ScaleStatus, s conversion.Scope) error {
+	out.Replicas = in.Replicas
+	out.Selector = ""
+	if in.Selector != nil {
+		selector, err := metav1.LabelSelectorAsSelector(in.Selector)
+		if err != nil {
+			return fmt.Errorf("invalid label selector: %v", err)
+		}
+		out.Selector = selector.String()
+	}
+
+	return nil
+}
+
+func Convert_v1_ScaleStatus_To_scheme_ScaleStatus(in *v1.ScaleStatus, out *scheme.ScaleStatus, s conversion.Scope) error {
+	out.Replicas = in.Replicas
+	if in.Selector != "" {
+		labelSelector, err := metav1.ParseToLabelSelector(in.Selector)
+		if err != nil {
+			out.Selector = nil
+			return fmt.Errorf("failed to parse target selector: %v", err)
+		}
+		out.Selector = labelSelector
+	}
+
+	return nil
+}
diff --git a/vendor/k8s.io/client-go/scale/scheme/autoscalingv1/doc.go b/vendor/k8s.io/client-go/scale/scheme/autoscalingv1/doc.go
new file mode 100644
index 00000000..03684dd9
--- /dev/null
+++ b/vendor/k8s.io/client-go/scale/scheme/autoscalingv1/doc.go
@@ -0,0 +1,20 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// +k8s:conversion-gen=k8s.io/kubernetes/vendor/k8s.io/client-go/scale/scheme
+// +k8s:conversion-gen-external-types=k8s.io/api/autoscaling/v1
+
+package autoscalingv1 // import "k8s.io/client-go/scale/scheme/autoscalingv1"
diff --git a/vendor/k8s.io/client-go/scale/scheme/autoscalingv1/register.go b/vendor/k8s.io/client-go/scale/scheme/autoscalingv1/register.go
new file mode 100644
index 00000000..4339376c
--- /dev/null
+++ b/vendor/k8s.io/client-go/scale/scheme/autoscalingv1/register.go
@@ -0,0 +1,45 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package autoscalingv1
+
+import (
+	autoscalingapiv1 "k8s.io/api/autoscaling/v1"
+	"k8s.io/apimachinery/pkg/runtime/schema"
+)
+
+// GroupName is the group name use in this package
+const GroupName = autoscalingapiv1.GroupName
+
+// SchemeGroupVersion is group version used to register these objects
+var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"}
+
+// Resource takes an unqualified resource and returns a Group qualified GroupResource
+func Resource(resource string) schema.GroupResource {
+	return SchemeGroupVersion.WithResource(resource).GroupResource()
+}
+
+var (
+	localSchemeBuilder = &autoscalingapiv1.SchemeBuilder
+	AddToScheme        = localSchemeBuilder.AddToScheme
+)
+
+func init() {
+	// We only register manually written functions here. The registration of the
+	// generated functions takes place in the generated files. The separation
+	// makes the code compile even when the generated files are missing.
+	localSchemeBuilder.Register()
+}
diff --git a/vendor/k8s.io/client-go/scale/scheme/autoscalingv1/zz_generated.conversion.go b/vendor/k8s.io/client-go/scale/scheme/autoscalingv1/zz_generated.conversion.go
new file mode 100644
index 00000000..9bc48695
--- /dev/null
+++ b/vendor/k8s.io/client-go/scale/scheme/autoscalingv1/zz_generated.conversion.go
@@ -0,0 +1,132 @@
+// +build !ignore_autogenerated
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by conversion-gen. DO NOT EDIT.
+
+package autoscalingv1
+
+import (
+	v1 "k8s.io/api/autoscaling/v1"
+	conversion "k8s.io/apimachinery/pkg/conversion"
+	runtime "k8s.io/apimachinery/pkg/runtime"
+	scheme "k8s.io/client-go/scale/scheme"
+)
+
+func init() {
+	localSchemeBuilder.Register(RegisterConversions)
+}
+
+// RegisterConversions adds conversion functions to the given scheme.
+// Public to allow building arbitrary schemes.
+func RegisterConversions(s *runtime.Scheme) error {
+	if err := s.AddGeneratedConversionFunc((*v1.Scale)(nil), (*scheme.Scale)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_v1_Scale_To_scheme_Scale(a.(*v1.Scale), b.(*scheme.Scale), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddGeneratedConversionFunc((*scheme.Scale)(nil), (*v1.Scale)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_scheme_Scale_To_v1_Scale(a.(*scheme.Scale), b.(*v1.Scale), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddGeneratedConversionFunc((*v1.ScaleSpec)(nil), (*scheme.ScaleSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_v1_ScaleSpec_To_scheme_ScaleSpec(a.(*v1.ScaleSpec), b.(*scheme.ScaleSpec), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddGeneratedConversionFunc((*scheme.ScaleSpec)(nil), (*v1.ScaleSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_scheme_ScaleSpec_To_v1_ScaleSpec(a.(*scheme.ScaleSpec), b.(*v1.ScaleSpec), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddConversionFunc((*scheme.ScaleStatus)(nil), (*v1.ScaleStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_scheme_ScaleStatus_To_v1_ScaleStatus(a.(*scheme.ScaleStatus), b.(*v1.ScaleStatus), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddConversionFunc((*v1.ScaleStatus)(nil), (*scheme.ScaleStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_v1_ScaleStatus_To_scheme_ScaleStatus(a.(*v1.ScaleStatus), b.(*scheme.ScaleStatus), scope)
+	}); err != nil {
+		return err
+	}
+	return nil
+}
+
+func autoConvert_v1_Scale_To_scheme_Scale(in *v1.Scale, out *scheme.Scale, s conversion.Scope) error {
+	out.ObjectMeta = in.ObjectMeta
+	if err := Convert_v1_ScaleSpec_To_scheme_ScaleSpec(&in.Spec, &out.Spec, s); err != nil {
+		return err
+	}
+	if err := Convert_v1_ScaleStatus_To_scheme_ScaleStatus(&in.Status, &out.Status, s); err != nil {
+		return err
+	}
+	return nil
+}
+
+// Convert_v1_Scale_To_scheme_Scale is an autogenerated conversion function.
+func Convert_v1_Scale_To_scheme_Scale(in *v1.Scale, out *scheme.Scale, s conversion.Scope) error {
+	return autoConvert_v1_Scale_To_scheme_Scale(in, out, s)
+}
+
+func autoConvert_scheme_Scale_To_v1_Scale(in *scheme.Scale, out *v1.Scale, s conversion.Scope) error {
+	out.ObjectMeta = in.ObjectMeta
+	if err := Convert_scheme_ScaleSpec_To_v1_ScaleSpec(&in.Spec, &out.Spec, s); err != nil {
+		return err
+	}
+	if err := Convert_scheme_ScaleStatus_To_v1_ScaleStatus(&in.Status, &out.Status, s); err != nil {
+		return err
+	}
+	return nil
+}
+
+// Convert_scheme_Scale_To_v1_Scale is an autogenerated conversion function.
+func Convert_scheme_Scale_To_v1_Scale(in *scheme.Scale, out *v1.Scale, s conversion.Scope) error {
+	return autoConvert_scheme_Scale_To_v1_Scale(in, out, s)
+}
+
+func autoConvert_v1_ScaleSpec_To_scheme_ScaleSpec(in *v1.ScaleSpec, out *scheme.ScaleSpec, s conversion.Scope) error {
+	out.Replicas = in.Replicas
+	return nil
+}
+
+// Convert_v1_ScaleSpec_To_scheme_ScaleSpec is an autogenerated conversion function.
+func Convert_v1_ScaleSpec_To_scheme_ScaleSpec(in *v1.ScaleSpec, out *scheme.ScaleSpec, s conversion.Scope) error {
+	return autoConvert_v1_ScaleSpec_To_scheme_ScaleSpec(in, out, s)
+}
+
+func autoConvert_scheme_ScaleSpec_To_v1_ScaleSpec(in *scheme.ScaleSpec, out *v1.ScaleSpec, s conversion.Scope) error {
+	out.Replicas = in.Replicas
+	return nil
+}
+
+// Convert_scheme_ScaleSpec_To_v1_ScaleSpec is an autogenerated conversion function.
+func Convert_scheme_ScaleSpec_To_v1_ScaleSpec(in *scheme.ScaleSpec, out *v1.ScaleSpec, s conversion.Scope) error {
+	return autoConvert_scheme_ScaleSpec_To_v1_ScaleSpec(in, out, s)
+}
+
+func autoConvert_v1_ScaleStatus_To_scheme_ScaleStatus(in *v1.ScaleStatus, out *scheme.ScaleStatus, s conversion.Scope) error {
+	out.Replicas = in.Replicas
+	// WARNING: in.Selector requires manual conversion: inconvertible types (string vs *k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelector)
+	return nil
+}
+
+func autoConvert_scheme_ScaleStatus_To_v1_ScaleStatus(in *scheme.ScaleStatus, out *v1.ScaleStatus, s conversion.Scope) error {
+	out.Replicas = in.Replicas
+	// WARNING: in.Selector requires manual conversion: inconvertible types (*k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelector vs string)
+	return nil
+}
diff --git a/vendor/k8s.io/client-go/scale/scheme/doc.go b/vendor/k8s.io/client-go/scale/scheme/doc.go
new file mode 100644
index 00000000..0203d6d5
--- /dev/null
+++ b/vendor/k8s.io/client-go/scale/scheme/doc.go
@@ -0,0 +1,22 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// +k8s:deepcopy-gen=package
+
+// Package scheme contains a runtime.Scheme to be used for serializing
+// and deserializing different versions of Scale, and for converting
+// in between them.
+package scheme
diff --git a/vendor/k8s.io/client-go/scale/scheme/extensionsint/doc.go b/vendor/k8s.io/client-go/scale/scheme/extensionsint/doc.go
new file mode 100644
index 00000000..9aaac608
--- /dev/null
+++ b/vendor/k8s.io/client-go/scale/scheme/extensionsint/doc.go
@@ -0,0 +1,22 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package extensionsint contains the necessary scaffolding of the
+// internal version of extensions as required by conversion logic.
+// It doesn't have any of its own types -- it's just necessary to
+// get the expected behavior out of runtime.Scheme.ConvertToVersion
+// and associated methods.
+package extensionsint
diff --git a/vendor/k8s.io/client-go/scale/scheme/extensionsint/register.go b/vendor/k8s.io/client-go/scale/scheme/extensionsint/register.go
new file mode 100644
index 00000000..570a8a54
--- /dev/null
+++ b/vendor/k8s.io/client-go/scale/scheme/extensionsint/register.go
@@ -0,0 +1,55 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package extensionsint
+
+import (
+	extensionsv1beta1 "k8s.io/api/extensions/v1beta1"
+	"k8s.io/apimachinery/pkg/runtime"
+	"k8s.io/apimachinery/pkg/runtime/schema"
+	scalescheme "k8s.io/client-go/scale/scheme"
+)
+
+// GroupName is the group name use in this package
+const GroupName = extensionsv1beta1.GroupName
+
+// SchemeGroupVersion is group version used to register these objects
+var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal}
+
+// Kind takes an unqualified kind and returns a Group qualified GroupKind
+func Kind(kind string) schema.GroupKind {
+	return SchemeGroupVersion.WithKind(kind).GroupKind()
+}
+
+// Resource takes an unqualified resource and returns a Group qualified GroupResource
+func Resource(resource string) schema.GroupResource {
+	return SchemeGroupVersion.WithResource(resource).GroupResource()
+}
+
+var (
+	// SchemeBuilder points to a list of functions added to Scheme.
+	SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes)
+	// AddToScheme applies all the stored functions to the scheme.
+	AddToScheme = SchemeBuilder.AddToScheme
+)
+
+// Adds the list of known types to api.Scheme.
+func addKnownTypes(scheme *runtime.Scheme) error {
+	scheme.AddKnownTypes(SchemeGroupVersion,
+		&scalescheme.Scale{},
+	)
+	return nil
+}
diff --git a/vendor/k8s.io/client-go/scale/scheme/extensionsv1beta1/conversion.go b/vendor/k8s.io/client-go/scale/scheme/extensionsv1beta1/conversion.go
new file mode 100644
index 00000000..821eb33d
--- /dev/null
+++ b/vendor/k8s.io/client-go/scale/scheme/extensionsv1beta1/conversion.go
@@ -0,0 +1,73 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package extensionsv1beta1
+
+import (
+	"fmt"
+
+	v1beta1 "k8s.io/api/extensions/v1beta1"
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	"k8s.io/apimachinery/pkg/conversion"
+	scheme "k8s.io/client-go/scale/scheme"
+)
+
+func Convert_scheme_ScaleStatus_To_v1beta1_ScaleStatus(in *scheme.ScaleStatus, out *v1beta1.ScaleStatus, s conversion.Scope) error {
+	out.Replicas = in.Replicas
+	out.Selector = nil
+	out.TargetSelector = ""
+	if in.Selector != nil {
+		if in.Selector.MatchExpressions == nil || len(in.Selector.MatchExpressions) == 0 {
+			out.Selector = in.Selector.MatchLabels
+		}
+
+		selector, err := metav1.LabelSelectorAsSelector(in.Selector)
+		if err != nil {
+			return fmt.Errorf("invalid label selector: %v", err)
+		}
+		out.TargetSelector = selector.String()
+	}
+
+	return nil
+}
+
+func Convert_v1beta1_ScaleStatus_To_scheme_ScaleStatus(in *v1beta1.ScaleStatus, out *scheme.ScaleStatus, s conversion.Scope) error {
+	out.Replicas = in.Replicas
+
+	// Normally when 2 fields map to the same internal value we favor the old field, since
+	// old clients can't be expected to know about new fields but clients that know about the
+	// new field can be expected to know about the old field (though that's not quite true, due
+	// to kubectl apply). However, these fields are readonly, so any non-nil value should work.
+	if in.TargetSelector != "" {
+		labelSelector, err := metav1.ParseToLabelSelector(in.TargetSelector)
+		if err != nil {
+			out.Selector = nil
+			return fmt.Errorf("failed to parse target selector: %v", err)
+		}
+		out.Selector = labelSelector
+	} else if in.Selector != nil {
+		out.Selector = new(metav1.LabelSelector)
+		selector := make(map[string]string)
+		for key, val := range in.Selector {
+			selector[key] = val
+		}
+		out.Selector.MatchLabels = selector
+	} else {
+		out.Selector = nil
+	}
+
+	return nil
+}
diff --git a/vendor/k8s.io/client-go/scale/scheme/extensionsv1beta1/doc.go b/vendor/k8s.io/client-go/scale/scheme/extensionsv1beta1/doc.go
new file mode 100644
index 00000000..1e719884
--- /dev/null
+++ b/vendor/k8s.io/client-go/scale/scheme/extensionsv1beta1/doc.go
@@ -0,0 +1,20 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// +k8s:conversion-gen=k8s.io/kubernetes/vendor/k8s.io/client-go/scale/scheme
+// +k8s:conversion-gen-external-types=k8s.io/api/extensions/v1beta1
+
+package extensionsv1beta1 // import "k8s.io/client-go/scale/scheme/extensionsv1beta1"
diff --git a/vendor/k8s.io/client-go/scale/scheme/extensionsv1beta1/register.go b/vendor/k8s.io/client-go/scale/scheme/extensionsv1beta1/register.go
new file mode 100644
index 00000000..248a0071
--- /dev/null
+++ b/vendor/k8s.io/client-go/scale/scheme/extensionsv1beta1/register.go
@@ -0,0 +1,45 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package extensionsv1beta1
+
+import (
+	extensionsapiv1beta1 "k8s.io/api/extensions/v1beta1"
+	"k8s.io/apimachinery/pkg/runtime/schema"
+)
+
+// GroupName is the group name use in this package
+const GroupName = extensionsapiv1beta1.GroupName
+
+// SchemeGroupVersion is group version used to register these objects
+var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1beta1"}
+
+// Resource takes an unqualified resource and returns a Group qualified GroupResource
+func Resource(resource string) schema.GroupResource {
+	return SchemeGroupVersion.WithResource(resource).GroupResource()
+}
+
+var (
+	localSchemeBuilder = &extensionsapiv1beta1.SchemeBuilder
+	AddToScheme        = localSchemeBuilder.AddToScheme
+)
+
+func init() {
+	// We only register manually written functions here. The registration of the
+	// generated functions takes place in the generated files. The separation
+	// makes the code compile even when the generated files are missing.
+	localSchemeBuilder.Register()
+}
diff --git a/vendor/k8s.io/client-go/scale/scheme/extensionsv1beta1/zz_generated.conversion.go b/vendor/k8s.io/client-go/scale/scheme/extensionsv1beta1/zz_generated.conversion.go
new file mode 100644
index 00000000..5fd69a5a
--- /dev/null
+++ b/vendor/k8s.io/client-go/scale/scheme/extensionsv1beta1/zz_generated.conversion.go
@@ -0,0 +1,133 @@
+// +build !ignore_autogenerated
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by conversion-gen. DO NOT EDIT.
+
+package extensionsv1beta1
+
+import (
+	v1beta1 "k8s.io/api/extensions/v1beta1"
+	conversion "k8s.io/apimachinery/pkg/conversion"
+	runtime "k8s.io/apimachinery/pkg/runtime"
+	scheme "k8s.io/client-go/scale/scheme"
+)
+
+func init() {
+	localSchemeBuilder.Register(RegisterConversions)
+}
+
+// RegisterConversions adds conversion functions to the given scheme.
+// Public to allow building arbitrary schemes.
+func RegisterConversions(s *runtime.Scheme) error {
+	if err := s.AddGeneratedConversionFunc((*v1beta1.Scale)(nil), (*scheme.Scale)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_v1beta1_Scale_To_scheme_Scale(a.(*v1beta1.Scale), b.(*scheme.Scale), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddGeneratedConversionFunc((*scheme.Scale)(nil), (*v1beta1.Scale)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_scheme_Scale_To_v1beta1_Scale(a.(*scheme.Scale), b.(*v1beta1.Scale), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddGeneratedConversionFunc((*v1beta1.ScaleSpec)(nil), (*scheme.ScaleSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_v1beta1_ScaleSpec_To_scheme_ScaleSpec(a.(*v1beta1.ScaleSpec), b.(*scheme.ScaleSpec), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddGeneratedConversionFunc((*scheme.ScaleSpec)(nil), (*v1beta1.ScaleSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_scheme_ScaleSpec_To_v1beta1_ScaleSpec(a.(*scheme.ScaleSpec), b.(*v1beta1.ScaleSpec), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddConversionFunc((*scheme.ScaleStatus)(nil), (*v1beta1.ScaleStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_scheme_ScaleStatus_To_v1beta1_ScaleStatus(a.(*scheme.ScaleStatus), b.(*v1beta1.ScaleStatus), scope)
+	}); err != nil {
+		return err
+	}
+	if err := s.AddConversionFunc((*v1beta1.ScaleStatus)(nil), (*scheme.ScaleStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
+		return Convert_v1beta1_ScaleStatus_To_scheme_ScaleStatus(a.(*v1beta1.ScaleStatus), b.(*scheme.ScaleStatus), scope)
+	}); err != nil {
+		return err
+	}
+	return nil
+}
+
+func autoConvert_v1beta1_Scale_To_scheme_Scale(in *v1beta1.Scale, out *scheme.Scale, s conversion.Scope) error {
+	out.ObjectMeta = in.ObjectMeta
+	if err := Convert_v1beta1_ScaleSpec_To_scheme_ScaleSpec(&in.Spec, &out.Spec, s); err != nil {
+		return err
+	}
+	if err := Convert_v1beta1_ScaleStatus_To_scheme_ScaleStatus(&in.Status, &out.Status, s); err != nil {
+		return err
+	}
+	return nil
+}
+
+// Convert_v1beta1_Scale_To_scheme_Scale is an autogenerated conversion function.
+func Convert_v1beta1_Scale_To_scheme_Scale(in *v1beta1.Scale, out *scheme.Scale, s conversion.Scope) error {
+	return autoConvert_v1beta1_Scale_To_scheme_Scale(in, out, s)
+}
+
+func autoConvert_scheme_Scale_To_v1beta1_Scale(in *scheme.Scale, out *v1beta1.Scale, s conversion.Scope) error {
+	out.ObjectMeta = in.ObjectMeta
+	if err := Convert_scheme_ScaleSpec_To_v1beta1_ScaleSpec(&in.Spec, &out.Spec, s); err != nil {
+		return err
+	}
+	if err := Convert_scheme_ScaleStatus_To_v1beta1_ScaleStatus(&in.Status, &out.Status, s); err != nil {
+		return err
+	}
+	return nil
+}
+
+// Convert_scheme_Scale_To_v1beta1_Scale is an autogenerated conversion function.
+func Convert_scheme_Scale_To_v1beta1_Scale(in *scheme.Scale, out *v1beta1.Scale, s conversion.Scope) error {
+	return autoConvert_scheme_Scale_To_v1beta1_Scale(in, out, s)
+}
+
+func autoConvert_v1beta1_ScaleSpec_To_scheme_ScaleSpec(in *v1beta1.ScaleSpec, out *scheme.ScaleSpec, s conversion.Scope) error {
+	out.Replicas = in.Replicas
+	return nil
+}
+
+// Convert_v1beta1_ScaleSpec_To_scheme_ScaleSpec is an autogenerated conversion function.
+func Convert_v1beta1_ScaleSpec_To_scheme_ScaleSpec(in *v1beta1.ScaleSpec, out *scheme.ScaleSpec, s conversion.Scope) error {
+	return autoConvert_v1beta1_ScaleSpec_To_scheme_ScaleSpec(in, out, s)
+}
+
+func autoConvert_scheme_ScaleSpec_To_v1beta1_ScaleSpec(in *scheme.ScaleSpec, out *v1beta1.ScaleSpec, s conversion.Scope) error {
+	out.Replicas = in.Replicas
+	return nil
+}
+
+// Convert_scheme_ScaleSpec_To_v1beta1_ScaleSpec is an autogenerated conversion function.
+func Convert_scheme_ScaleSpec_To_v1beta1_ScaleSpec(in *scheme.ScaleSpec, out *v1beta1.ScaleSpec, s conversion.Scope) error {
+	return autoConvert_scheme_ScaleSpec_To_v1beta1_ScaleSpec(in, out, s)
+}
+
+func autoConvert_v1beta1_ScaleStatus_To_scheme_ScaleStatus(in *v1beta1.ScaleStatus, out *scheme.ScaleStatus, s conversion.Scope) error {
+	out.Replicas = in.Replicas
+	// WARNING: in.Selector requires manual conversion: inconvertible types (map[string]string vs *k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelector)
+	// WARNING: in.TargetSelector requires manual conversion: does not exist in peer-type
+	return nil
+}
+
+func autoConvert_scheme_ScaleStatus_To_v1beta1_ScaleStatus(in *scheme.ScaleStatus, out *v1beta1.ScaleStatus, s conversion.Scope) error {
+	out.Replicas = in.Replicas
+	// WARNING: in.Selector requires manual conversion: inconvertible types (*k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelector vs map[string]string)
+	return nil
+}
diff --git a/vendor/k8s.io/client-go/scale/scheme/register.go b/vendor/k8s.io/client-go/scale/scheme/register.go
new file mode 100644
index 00000000..4339e617
--- /dev/null
+++ b/vendor/k8s.io/client-go/scale/scheme/register.go
@@ -0,0 +1,54 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package scheme
+
+import (
+	autoscalingv1 "k8s.io/api/autoscaling/v1"
+	"k8s.io/apimachinery/pkg/runtime"
+	"k8s.io/apimachinery/pkg/runtime/schema"
+)
+
+// GroupName is the group name use in this package
+const GroupName = autoscalingv1.GroupName
+
+// SchemeGroupVersion is group version used to register these objects
+var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal}
+
+// Kind takes an unqualified kind and returns a Group qualified GroupKind
+func Kind(kind string) schema.GroupKind {
+	return SchemeGroupVersion.WithKind(kind).GroupKind()
+}
+
+// Resource takes an unqualified resource and returns a Group qualified GroupResource
+func Resource(resource string) schema.GroupResource {
+	return SchemeGroupVersion.WithResource(resource).GroupResource()
+}
+
+var (
+	// SchemeBuilder points to a list of functions added to Scheme.
+	SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes)
+	// AddToScheme applies all the stored functions to the scheme.
+	AddToScheme = SchemeBuilder.AddToScheme
+)
+
+// Adds the list of known types to api.Scheme.
+func addKnownTypes(scheme *runtime.Scheme) error {
+	scheme.AddKnownTypes(SchemeGroupVersion,
+		&Scale{},
+	)
+	return nil
+}
diff --git a/vendor/k8s.io/client-go/scale/scheme/types.go b/vendor/k8s.io/client-go/scale/scheme/types.go
new file mode 100644
index 00000000..5c5d0a6f
--- /dev/null
+++ b/vendor/k8s.io/client-go/scale/scheme/types.go
@@ -0,0 +1,60 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package scheme
+
+import (
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+// This file contains our own "internal" version of scale that we use for conversions,
+// since we can't use the main Kubernetes internal versions.
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// Scale represents a scaling request for a resource.
+type Scale struct {
+	metav1.TypeMeta
+	// Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata.
+	// +optional
+	metav1.ObjectMeta
+
+	// defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status.
+	// +optional
+	Spec ScaleSpec
+
+	// current status of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. Read-only.
+	// +optional
+	Status ScaleStatus
+}
+
+// ScaleSpec describes the attributes of a scale subresource.
+type ScaleSpec struct {
+	// desired number of instances for the scaled object.
+	// +optional
+	Replicas int32
+}
+
+// ScaleStatus represents the current status of a scale subresource.
+type ScaleStatus struct {
+	// actual number of observed instances of the scaled object.
+	Replicas int32
+
+	// label query over pods that should match the replicas count.
+	// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors
+	// +optional
+	Selector *metav1.LabelSelector
+}
diff --git a/vendor/k8s.io/client-go/scale/scheme/zz_generated.deepcopy.go b/vendor/k8s.io/client-go/scale/scheme/zz_generated.deepcopy.go
new file mode 100644
index 00000000..3db70815
--- /dev/null
+++ b/vendor/k8s.io/client-go/scale/scheme/zz_generated.deepcopy.go
@@ -0,0 +1,91 @@
+// +build !ignore_autogenerated
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by deepcopy-gen. DO NOT EDIT.
+
+package scheme
+
+import (
+	v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	runtime "k8s.io/apimachinery/pkg/runtime"
+)
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Scale) DeepCopyInto(out *Scale) {
+	*out = *in
+	out.TypeMeta = in.TypeMeta
+	in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+	out.Spec = in.Spec
+	in.Status.DeepCopyInto(&out.Status)
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Scale.
+func (in *Scale) DeepCopy() *Scale {
+	if in == nil {
+		return nil
+	}
+	out := new(Scale)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *Scale) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ScaleSpec) DeepCopyInto(out *ScaleSpec) {
+	*out = *in
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScaleSpec.
+func (in *ScaleSpec) DeepCopy() *ScaleSpec {
+	if in == nil {
+		return nil
+	}
+	out := new(ScaleSpec)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ScaleStatus) DeepCopyInto(out *ScaleStatus) {
+	*out = *in
+	if in.Selector != nil {
+		in, out := &in.Selector, &out.Selector
+		*out = new(v1.LabelSelector)
+		(*in).DeepCopyInto(*out)
+	}
+	return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScaleStatus.
+func (in *ScaleStatus) DeepCopy() *ScaleStatus {
+	if in == nil {
+		return nil
+	}
+	out := new(ScaleStatus)
+	in.DeepCopyInto(out)
+	return out
+}
diff --git a/vendor/k8s.io/client-go/scale/util.go b/vendor/k8s.io/client-go/scale/util.go
new file mode 100644
index 00000000..2f43a7a7
--- /dev/null
+++ b/vendor/k8s.io/client-go/scale/util.go
@@ -0,0 +1,197 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package scale
+
+import (
+	"fmt"
+	"strings"
+	"sync"
+
+	"k8s.io/apimachinery/pkg/api/meta"
+	"k8s.io/apimachinery/pkg/runtime"
+	"k8s.io/apimachinery/pkg/runtime/schema"
+	serializer "k8s.io/apimachinery/pkg/runtime/serializer"
+	utilruntime "k8s.io/apimachinery/pkg/util/runtime"
+	"k8s.io/client-go/discovery"
+	scalescheme "k8s.io/client-go/scale/scheme"
+	scaleappsint "k8s.io/client-go/scale/scheme/appsint"
+	scaleappsv1beta1 "k8s.io/client-go/scale/scheme/appsv1beta1"
+	scaleappsv1beta2 "k8s.io/client-go/scale/scheme/appsv1beta2"
+	scaleautoscaling "k8s.io/client-go/scale/scheme/autoscalingv1"
+	scaleextint "k8s.io/client-go/scale/scheme/extensionsint"
+	scaleext "k8s.io/client-go/scale/scheme/extensionsv1beta1"
+)
+
+// PreferredResourceMapper determines the preferred version of a resource to scale
+type PreferredResourceMapper interface {
+	// ResourceFor takes a partial resource and returns the preferred resource.
+	ResourceFor(resource schema.GroupVersionResource) (preferredResource schema.GroupVersionResource, err error)
+}
+
+// Ensure a RESTMapper satisfies the PreferredResourceMapper interface
+var _ PreferredResourceMapper = meta.RESTMapper(nil)
+
+// ScaleKindResolver knows about the relationship between
+// resources and the GroupVersionKind of their scale subresources.
+type ScaleKindResolver interface {
+	// ScaleForResource returns the GroupVersionKind of the
+	// scale subresource for the given GroupVersionResource.
+	ScaleForResource(resource schema.GroupVersionResource) (scaleVersion schema.GroupVersionKind, err error)
+}
+
+// discoveryScaleResolver is a ScaleKindResolver that uses
+// a DiscoveryInterface to associate resources with their
+// scale-kinds
+type discoveryScaleResolver struct {
+	discoveryClient discovery.ServerResourcesInterface
+}
+
+func (r *discoveryScaleResolver) ScaleForResource(inputRes schema.GroupVersionResource) (scaleVersion schema.GroupVersionKind, err error) {
+	groupVerResources, err := r.discoveryClient.ServerResourcesForGroupVersion(inputRes.GroupVersion().String())
+	if err != nil {
+		return schema.GroupVersionKind{}, fmt.Errorf("unable to fetch discovery information for %s: %v", inputRes.String(), err)
+	}
+
+	for _, resource := range groupVerResources.APIResources {
+		resourceParts := strings.SplitN(resource.Name, "/", 2)
+		if len(resourceParts) != 2 || resourceParts[0] != inputRes.Resource || resourceParts[1] != "scale" {
+			// skip non-scale resources, or scales for resources that we're not looking for
+			continue
+		}
+
+		scaleGV := inputRes.GroupVersion()
+		if resource.Group != "" && resource.Version != "" {
+			scaleGV = schema.GroupVersion{
+				Group:   resource.Group,
+				Version: resource.Version,
+			}
+		}
+
+		return scaleGV.WithKind(resource.Kind), nil
+	}
+
+	return schema.GroupVersionKind{}, fmt.Errorf("could not find scale subresource for %s in discovery information", inputRes.String())
+}
+
+// cachedScaleKindResolver is a ScaleKindResolver that caches results
+// from another ScaleKindResolver, re-fetching on cache misses.
+type cachedScaleKindResolver struct {
+	base ScaleKindResolver
+
+	cache map[schema.GroupVersionResource]schema.GroupVersionKind
+	mu    sync.RWMutex
+}
+
+func (r *cachedScaleKindResolver) ScaleForResource(resource schema.GroupVersionResource) (schema.GroupVersionKind, error) {
+	r.mu.RLock()
+	gvk, isCached := r.cache[resource]
+	r.mu.RUnlock()
+	if isCached {
+		return gvk, nil
+	}
+
+	// we could have multiple fetches of the same resources, but that's probably
+	// better than limiting to only one reader at once (mu.Mutex),
+	// or blocking checks for other resources while we fetch
+	// (mu.Lock before fetch).
+	gvk, err := r.base.ScaleForResource(resource)
+	if err != nil {
+		return schema.GroupVersionKind{}, err
+	}
+
+	r.mu.Lock()
+	defer r.mu.Unlock()
+	r.cache[resource] = gvk
+
+	return gvk, nil
+}
+
+// NewDiscoveryScaleKindResolver creates a new ScaleKindResolver which uses information from the given
+// disovery client to resolve the correct Scale GroupVersionKind for different resources.
+func NewDiscoveryScaleKindResolver(client discovery.ServerResourcesInterface) ScaleKindResolver {
+	base := &discoveryScaleResolver{
+		discoveryClient: client,
+	}
+
+	return &cachedScaleKindResolver{
+		base:  base,
+		cache: make(map[schema.GroupVersionResource]schema.GroupVersionKind),
+	}
+}
+
+// ScaleConverter knows how to convert between external scale versions.
+type ScaleConverter struct {
+	scheme            *runtime.Scheme
+	codecs            serializer.CodecFactory
+	internalVersioner runtime.GroupVersioner
+}
+
+// NewScaleConverter creates a new ScaleConverter for converting between
+// Scales in autoscaling/v1 and extensions/v1beta1.
+func NewScaleConverter() *ScaleConverter {
+	scheme := runtime.NewScheme()
+	utilruntime.Must(scaleautoscaling.AddToScheme(scheme))
+	utilruntime.Must(scalescheme.AddToScheme(scheme))
+	utilruntime.Must(scaleext.AddToScheme(scheme))
+	utilruntime.Must(scaleextint.AddToScheme(scheme))
+	utilruntime.Must(scaleappsint.AddToScheme(scheme))
+	utilruntime.Must(scaleappsv1beta1.AddToScheme(scheme))
+	utilruntime.Must(scaleappsv1beta2.AddToScheme(scheme))
+
+	return &ScaleConverter{
+		scheme: scheme,
+		codecs: serializer.NewCodecFactory(scheme),
+		internalVersioner: runtime.NewMultiGroupVersioner(
+			scalescheme.SchemeGroupVersion,
+			schema.GroupKind{Group: scaleext.GroupName, Kind: "Scale"},
+			schema.GroupKind{Group: scaleautoscaling.GroupName, Kind: "Scale"},
+			schema.GroupKind{Group: scaleappsv1beta1.GroupName, Kind: "Scale"},
+			schema.GroupKind{Group: scaleappsv1beta2.GroupName, Kind: "Scale"},
+		),
+	}
+}
+
+// Scheme returns the scheme used by this scale converter.
+func (c *ScaleConverter) Scheme() *runtime.Scheme {
+	return c.scheme
+}
+
+func (c *ScaleConverter) Codecs() serializer.CodecFactory {
+	return c.codecs
+}
+
+func (c *ScaleConverter) ScaleVersions() []schema.GroupVersion {
+	return []schema.GroupVersion{
+		scaleautoscaling.SchemeGroupVersion,
+		scalescheme.SchemeGroupVersion,
+		scaleext.SchemeGroupVersion,
+		scaleextint.SchemeGroupVersion,
+		scaleappsint.SchemeGroupVersion,
+		scaleappsv1beta1.SchemeGroupVersion,
+		scaleappsv1beta2.SchemeGroupVersion,
+	}
+}
+
+// ConvertToVersion converts the given *external* input object to the given output *external* output group-version.
+func (c *ScaleConverter) ConvertToVersion(in runtime.Object, outVersion schema.GroupVersion) (runtime.Object, error) {
+	scaleInt, err := c.scheme.ConvertToVersion(in, c.internalVersioner)
+	if err != nil {
+		return nil, err
+	}
+
+	return c.scheme.ConvertToVersion(scaleInt, outVersion)
+}
diff --git a/vendor/k8s.io/client-go/third_party/forked/golang/template/exec.go b/vendor/k8s.io/client-go/third_party/forked/golang/template/exec.go
new file mode 100644
index 00000000..739fd350
--- /dev/null
+++ b/vendor/k8s.io/client-go/third_party/forked/golang/template/exec.go
@@ -0,0 +1,94 @@
+//This package is copied from Go library text/template.
+//The original private functions indirect and printableValue
+//are exported as public functions.
+package template
+
+import (
+	"fmt"
+	"reflect"
+)
+
+var Indirect = indirect
+var PrintableValue = printableValue
+
+var (
+	errorType       = reflect.TypeOf((*error)(nil)).Elem()
+	fmtStringerType = reflect.TypeOf((*fmt.Stringer)(nil)).Elem()
+)
+
+// indirect returns the item at the end of indirection, and a bool to indicate if it's nil.
+// We indirect through pointers and empty interfaces (only) because
+// non-empty interfaces have methods we might need.
+func indirect(v reflect.Value) (rv reflect.Value, isNil bool) {
+	for ; v.Kind() == reflect.Ptr || v.Kind() == reflect.Interface; v = v.Elem() {
+		if v.IsNil() {
+			return v, true
+		}
+		if v.Kind() == reflect.Interface && v.NumMethod() > 0 {
+			break
+		}
+	}
+	return v, false
+}
+
+// printableValue returns the, possibly indirected, interface value inside v that
+// is best for a call to formatted printer.
+func printableValue(v reflect.Value) (interface{}, bool) {
+	if v.Kind() == reflect.Ptr {
+		v, _ = indirect(v) // fmt.Fprint handles nil.
+	}
+	if !v.IsValid() {
+		return "<no value>", true
+	}
+
+	if !v.Type().Implements(errorType) && !v.Type().Implements(fmtStringerType) {
+		if v.CanAddr() && (reflect.PtrTo(v.Type()).Implements(errorType) || reflect.PtrTo(v.Type()).Implements(fmtStringerType)) {
+			v = v.Addr()
+		} else {
+			switch v.Kind() {
+			case reflect.Chan, reflect.Func:
+				return nil, false
+			}
+		}
+	}
+	return v.Interface(), true
+}
+
+// canBeNil reports whether an untyped nil can be assigned to the type. See reflect.Zero.
+func canBeNil(typ reflect.Type) bool {
+	switch typ.Kind() {
+	case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice:
+		return true
+	}
+	return false
+}
+
+// isTrue reports whether the value is 'true', in the sense of not the zero of its type,
+// and whether the value has a meaningful truth value.
+func isTrue(val reflect.Value) (truth, ok bool) {
+	if !val.IsValid() {
+		// Something like var x interface{}, never set. It's a form of nil.
+		return false, true
+	}
+	switch val.Kind() {
+	case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
+		truth = val.Len() > 0
+	case reflect.Bool:
+		truth = val.Bool()
+	case reflect.Complex64, reflect.Complex128:
+		truth = val.Complex() != 0
+	case reflect.Chan, reflect.Func, reflect.Ptr, reflect.Interface:
+		truth = !val.IsNil()
+	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+		truth = val.Int() != 0
+	case reflect.Float32, reflect.Float64:
+		truth = val.Float() != 0
+	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+		truth = val.Uint() != 0
+	case reflect.Struct:
+		truth = true // Struct values are always true.
+	default:
+		return
+	}
+	return truth, true
+}
diff --git a/vendor/k8s.io/client-go/third_party/forked/golang/template/funcs.go b/vendor/k8s.io/client-go/third_party/forked/golang/template/funcs.go
new file mode 100644
index 00000000..27a008b0
--- /dev/null
+++ b/vendor/k8s.io/client-go/third_party/forked/golang/template/funcs.go
@@ -0,0 +1,599 @@
+//This package is copied from Go library text/template.
+//The original private functions eq, ge, gt, le, lt, and ne
+//are exported as public functions.
+package template
+
+import (
+	"bytes"
+	"errors"
+	"fmt"
+	"io"
+	"net/url"
+	"reflect"
+	"strings"
+	"unicode"
+	"unicode/utf8"
+)
+
+var Equal = eq
+var GreaterEqual = ge
+var Greater = gt
+var LessEqual = le
+var Less = lt
+var NotEqual = ne
+
+// FuncMap is the type of the map defining the mapping from names to functions.
+// Each function must have either a single return value, or two return values of
+// which the second has type error. In that case, if the second (error)
+// return value evaluates to non-nil during execution, execution terminates and
+// Execute returns that error.
+type FuncMap map[string]interface{}
+
+var builtins = FuncMap{
+	"and":      and,
+	"call":     call,
+	"html":     HTMLEscaper,
+	"index":    index,
+	"js":       JSEscaper,
+	"len":      length,
+	"not":      not,
+	"or":       or,
+	"print":    fmt.Sprint,
+	"printf":   fmt.Sprintf,
+	"println":  fmt.Sprintln,
+	"urlquery": URLQueryEscaper,
+
+	// Comparisons
+	"eq": eq, // ==
+	"ge": ge, // >=
+	"gt": gt, // >
+	"le": le, // <=
+	"lt": lt, // <
+	"ne": ne, // !=
+}
+
+var builtinFuncs = createValueFuncs(builtins)
+
+// createValueFuncs turns a FuncMap into a map[string]reflect.Value
+func createValueFuncs(funcMap FuncMap) map[string]reflect.Value {
+	m := make(map[string]reflect.Value)
+	addValueFuncs(m, funcMap)
+	return m
+}
+
+// addValueFuncs adds to values the functions in funcs, converting them to reflect.Values.
+func addValueFuncs(out map[string]reflect.Value, in FuncMap) {
+	for name, fn := range in {
+		v := reflect.ValueOf(fn)
+		if v.Kind() != reflect.Func {
+			panic("value for " + name + " not a function")
+		}
+		if !goodFunc(v.Type()) {
+			panic(fmt.Errorf("can't install method/function %q with %d results", name, v.Type().NumOut()))
+		}
+		out[name] = v
+	}
+}
+
+// AddFuncs adds to values the functions in funcs. It does no checking of the input -
+// call addValueFuncs first.
+func addFuncs(out, in FuncMap) {
+	for name, fn := range in {
+		out[name] = fn
+	}
+}
+
+// goodFunc checks that the function or method has the right result signature.
+func goodFunc(typ reflect.Type) bool {
+	// We allow functions with 1 result or 2 results where the second is an error.
+	switch {
+	case typ.NumOut() == 1:
+		return true
+	case typ.NumOut() == 2 && typ.Out(1) == errorType:
+		return true
+	}
+	return false
+}
+
+// findFunction looks for a function in the template, and global map.
+func findFunction(name string) (reflect.Value, bool) {
+	if fn := builtinFuncs[name]; fn.IsValid() {
+		return fn, true
+	}
+	return reflect.Value{}, false
+}
+
+// Indexing.
+
+// index returns the result of indexing its first argument by the following
+// arguments.  Thus "index x 1 2 3" is, in Go syntax, x[1][2][3]. Each
+// indexed item must be a map, slice, or array.
+func index(item interface{}, indices ...interface{}) (interface{}, error) {
+	v := reflect.ValueOf(item)
+	for _, i := range indices {
+		index := reflect.ValueOf(i)
+		var isNil bool
+		if v, isNil = indirect(v); isNil {
+			return nil, fmt.Errorf("index of nil pointer")
+		}
+		switch v.Kind() {
+		case reflect.Array, reflect.Slice, reflect.String:
+			var x int64
+			switch index.Kind() {
+			case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+				x = index.Int()
+			case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+				x = int64(index.Uint())
+			default:
+				return nil, fmt.Errorf("cannot index slice/array with type %s", index.Type())
+			}
+			if x < 0 || x >= int64(v.Len()) {
+				return nil, fmt.Errorf("index out of range: %d", x)
+			}
+			v = v.Index(int(x))
+		case reflect.Map:
+			if !index.IsValid() {
+				index = reflect.Zero(v.Type().Key())
+			}
+			if !index.Type().AssignableTo(v.Type().Key()) {
+				return nil, fmt.Errorf("%s is not index type for %s", index.Type(), v.Type())
+			}
+			if x := v.MapIndex(index); x.IsValid() {
+				v = x
+			} else {
+				v = reflect.Zero(v.Type().Elem())
+			}
+		default:
+			return nil, fmt.Errorf("can't index item of type %s", v.Type())
+		}
+	}
+	return v.Interface(), nil
+}
+
+// Length
+
+// length returns the length of the item, with an error if it has no defined length.
+func length(item interface{}) (int, error) {
+	v, isNil := indirect(reflect.ValueOf(item))
+	if isNil {
+		return 0, fmt.Errorf("len of nil pointer")
+	}
+	switch v.Kind() {
+	case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice, reflect.String:
+		return v.Len(), nil
+	}
+	return 0, fmt.Errorf("len of type %s", v.Type())
+}
+
+// Function invocation
+
+// call returns the result of evaluating the first argument as a function.
+// The function must return 1 result, or 2 results, the second of which is an error.
+func call(fn interface{}, args ...interface{}) (interface{}, error) {
+	v := reflect.ValueOf(fn)
+	typ := v.Type()
+	if typ.Kind() != reflect.Func {
+		return nil, fmt.Errorf("non-function of type %s", typ)
+	}
+	if !goodFunc(typ) {
+		return nil, fmt.Errorf("function called with %d args; should be 1 or 2", typ.NumOut())
+	}
+	numIn := typ.NumIn()
+	var dddType reflect.Type
+	if typ.IsVariadic() {
+		if len(args) < numIn-1 {
+			return nil, fmt.Errorf("wrong number of args: got %d want at least %d", len(args), numIn-1)
+		}
+		dddType = typ.In(numIn - 1).Elem()
+	} else {
+		if len(args) != numIn {
+			return nil, fmt.Errorf("wrong number of args: got %d want %d", len(args), numIn)
+		}
+	}
+	argv := make([]reflect.Value, len(args))
+	for i, arg := range args {
+		value := reflect.ValueOf(arg)
+		// Compute the expected type. Clumsy because of variadics.
+		var argType reflect.Type
+		if !typ.IsVariadic() || i < numIn-1 {
+			argType = typ.In(i)
+		} else {
+			argType = dddType
+		}
+		if !value.IsValid() && canBeNil(argType) {
+			value = reflect.Zero(argType)
+		}
+		if !value.Type().AssignableTo(argType) {
+			return nil, fmt.Errorf("arg %d has type %s; should be %s", i, value.Type(), argType)
+		}
+		argv[i] = value
+	}
+	result := v.Call(argv)
+	if len(result) == 2 && !result[1].IsNil() {
+		return result[0].Interface(), result[1].Interface().(error)
+	}
+	return result[0].Interface(), nil
+}
+
+// Boolean logic.
+
+func truth(a interface{}) bool {
+	t, _ := isTrue(reflect.ValueOf(a))
+	return t
+}
+
+// and computes the Boolean AND of its arguments, returning
+// the first false argument it encounters, or the last argument.
+func and(arg0 interface{}, args ...interface{}) interface{} {
+	if !truth(arg0) {
+		return arg0
+	}
+	for i := range args {
+		arg0 = args[i]
+		if !truth(arg0) {
+			break
+		}
+	}
+	return arg0
+}
+
+// or computes the Boolean OR of its arguments, returning
+// the first true argument it encounters, or the last argument.
+func or(arg0 interface{}, args ...interface{}) interface{} {
+	if truth(arg0) {
+		return arg0
+	}
+	for i := range args {
+		arg0 = args[i]
+		if truth(arg0) {
+			break
+		}
+	}
+	return arg0
+}
+
+// not returns the Boolean negation of its argument.
+func not(arg interface{}) (truth bool) {
+	truth, _ = isTrue(reflect.ValueOf(arg))
+	return !truth
+}
+
+// Comparison.
+
+// TODO: Perhaps allow comparison between signed and unsigned integers.
+
+var (
+	errBadComparisonType = errors.New("invalid type for comparison")
+	errBadComparison     = errors.New("incompatible types for comparison")
+	errNoComparison      = errors.New("missing argument for comparison")
+)
+
+type kind int
+
+const (
+	invalidKind kind = iota
+	boolKind
+	complexKind
+	intKind
+	floatKind
+	integerKind
+	stringKind
+	uintKind
+)
+
+func basicKind(v reflect.Value) (kind, error) {
+	switch v.Kind() {
+	case reflect.Bool:
+		return boolKind, nil
+	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+		return intKind, nil
+	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+		return uintKind, nil
+	case reflect.Float32, reflect.Float64:
+		return floatKind, nil
+	case reflect.Complex64, reflect.Complex128:
+		return complexKind, nil
+	case reflect.String:
+		return stringKind, nil
+	}
+	return invalidKind, errBadComparisonType
+}
+
+// eq evaluates the comparison a == b || a == c || ...
+func eq(arg1 interface{}, arg2 ...interface{}) (bool, error) {
+	v1 := reflect.ValueOf(arg1)
+	k1, err := basicKind(v1)
+	if err != nil {
+		return false, err
+	}
+	if len(arg2) == 0 {
+		return false, errNoComparison
+	}
+	for _, arg := range arg2 {
+		v2 := reflect.ValueOf(arg)
+		k2, err := basicKind(v2)
+		if err != nil {
+			return false, err
+		}
+		truth := false
+		if k1 != k2 {
+			// Special case: Can compare integer values regardless of type's sign.
+			switch {
+			case k1 == intKind && k2 == uintKind:
+				truth = v1.Int() >= 0 && uint64(v1.Int()) == v2.Uint()
+			case k1 == uintKind && k2 == intKind:
+				truth = v2.Int() >= 0 && v1.Uint() == uint64(v2.Int())
+			default:
+				return false, errBadComparison
+			}
+		} else {
+			switch k1 {
+			case boolKind:
+				truth = v1.Bool() == v2.Bool()
+			case complexKind:
+				truth = v1.Complex() == v2.Complex()
+			case floatKind:
+				truth = v1.Float() == v2.Float()
+			case intKind:
+				truth = v1.Int() == v2.Int()
+			case stringKind:
+				truth = v1.String() == v2.String()
+			case uintKind:
+				truth = v1.Uint() == v2.Uint()
+			default:
+				panic("invalid kind")
+			}
+		}
+		if truth {
+			return true, nil
+		}
+	}
+	return false, nil
+}
+
+// ne evaluates the comparison a != b.
+func ne(arg1, arg2 interface{}) (bool, error) {
+	// != is the inverse of ==.
+	equal, err := eq(arg1, arg2)
+	return !equal, err
+}
+
+// lt evaluates the comparison a < b.
+func lt(arg1, arg2 interface{}) (bool, error) {
+	v1 := reflect.ValueOf(arg1)
+	k1, err := basicKind(v1)
+	if err != nil {
+		return false, err
+	}
+	v2 := reflect.ValueOf(arg2)
+	k2, err := basicKind(v2)
+	if err != nil {
+		return false, err
+	}
+	truth := false
+	if k1 != k2 {
+		// Special case: Can compare integer values regardless of type's sign.
+		switch {
+		case k1 == intKind && k2 == uintKind:
+			truth = v1.Int() < 0 || uint64(v1.Int()) < v2.Uint()
+		case k1 == uintKind && k2 == intKind:
+			truth = v2.Int() >= 0 && v1.Uint() < uint64(v2.Int())
+		default:
+			return false, errBadComparison
+		}
+	} else {
+		switch k1 {
+		case boolKind, complexKind:
+			return false, errBadComparisonType
+		case floatKind:
+			truth = v1.Float() < v2.Float()
+		case intKind:
+			truth = v1.Int() < v2.Int()
+		case stringKind:
+			truth = v1.String() < v2.String()
+		case uintKind:
+			truth = v1.Uint() < v2.Uint()
+		default:
+			panic("invalid kind")
+		}
+	}
+	return truth, nil
+}
+
+// le evaluates the comparison <= b.
+func le(arg1, arg2 interface{}) (bool, error) {
+	// <= is < or ==.
+	lessThan, err := lt(arg1, arg2)
+	if lessThan || err != nil {
+		return lessThan, err
+	}
+	return eq(arg1, arg2)
+}
+
+// gt evaluates the comparison a > b.
+func gt(arg1, arg2 interface{}) (bool, error) {
+	// > is the inverse of <=.
+	lessOrEqual, err := le(arg1, arg2)
+	if err != nil {
+		return false, err
+	}
+	return !lessOrEqual, nil
+}
+
+// ge evaluates the comparison a >= b.
+func ge(arg1, arg2 interface{}) (bool, error) {
+	// >= is the inverse of <.
+	lessThan, err := lt(arg1, arg2)
+	if err != nil {
+		return false, err
+	}
+	return !lessThan, nil
+}
+
+// HTML escaping.
+
+var (
+	htmlQuot = []byte("&#34;") // shorter than "&quot;"
+	htmlApos = []byte("&#39;") // shorter than "&apos;" and apos was not in HTML until HTML5
+	htmlAmp  = []byte("&amp;")
+	htmlLt   = []byte("&lt;")
+	htmlGt   = []byte("&gt;")
+)
+
+// HTMLEscape writes to w the escaped HTML equivalent of the plain text data b.
+func HTMLEscape(w io.Writer, b []byte) {
+	last := 0
+	for i, c := range b {
+		var html []byte
+		switch c {
+		case '"':
+			html = htmlQuot
+		case '\'':
+			html = htmlApos
+		case '&':
+			html = htmlAmp
+		case '<':
+			html = htmlLt
+		case '>':
+			html = htmlGt
+		default:
+			continue
+		}
+		w.Write(b[last:i])
+		w.Write(html)
+		last = i + 1
+	}
+	w.Write(b[last:])
+}
+
+// HTMLEscapeString returns the escaped HTML equivalent of the plain text data s.
+func HTMLEscapeString(s string) string {
+	// Avoid allocation if we can.
+	if strings.IndexAny(s, `'"&<>`) < 0 {
+		return s
+	}
+	var b bytes.Buffer
+	HTMLEscape(&b, []byte(s))
+	return b.String()
+}
+
+// HTMLEscaper returns the escaped HTML equivalent of the textual
+// representation of its arguments.
+func HTMLEscaper(args ...interface{}) string {
+	return HTMLEscapeString(evalArgs(args))
+}
+
+// JavaScript escaping.
+
+var (
+	jsLowUni = []byte(`\u00`)
+	hex      = []byte("0123456789ABCDEF")
+
+	jsBackslash = []byte(`\\`)
+	jsApos      = []byte(`\'`)
+	jsQuot      = []byte(`\"`)
+	jsLt        = []byte(`\x3C`)
+	jsGt        = []byte(`\x3E`)
+)
+
+// JSEscape writes to w the escaped JavaScript equivalent of the plain text data b.
+func JSEscape(w io.Writer, b []byte) {
+	last := 0
+	for i := 0; i < len(b); i++ {
+		c := b[i]
+
+		if !jsIsSpecial(rune(c)) {
+			// fast path: nothing to do
+			continue
+		}
+		w.Write(b[last:i])
+
+		if c < utf8.RuneSelf {
+			// Quotes, slashes and angle brackets get quoted.
+			// Control characters get written as \u00XX.
+			switch c {
+			case '\\':
+				w.Write(jsBackslash)
+			case '\'':
+				w.Write(jsApos)
+			case '"':
+				w.Write(jsQuot)
+			case '<':
+				w.Write(jsLt)
+			case '>':
+				w.Write(jsGt)
+			default:
+				w.Write(jsLowUni)
+				t, b := c>>4, c&0x0f
+				w.Write(hex[t : t+1])
+				w.Write(hex[b : b+1])
+			}
+		} else {
+			// Unicode rune.
+			r, size := utf8.DecodeRune(b[i:])
+			if unicode.IsPrint(r) {
+				w.Write(b[i : i+size])
+			} else {
+				fmt.Fprintf(w, "\\u%04X", r)
+			}
+			i += size - 1
+		}
+		last = i + 1
+	}
+	w.Write(b[last:])
+}
+
+// JSEscapeString returns the escaped JavaScript equivalent of the plain text data s.
+func JSEscapeString(s string) string {
+	// Avoid allocation if we can.
+	if strings.IndexFunc(s, jsIsSpecial) < 0 {
+		return s
+	}
+	var b bytes.Buffer
+	JSEscape(&b, []byte(s))
+	return b.String()
+}
+
+func jsIsSpecial(r rune) bool {
+	switch r {
+	case '\\', '\'', '"', '<', '>':
+		return true
+	}
+	return r < ' ' || utf8.RuneSelf <= r
+}
+
+// JSEscaper returns the escaped JavaScript equivalent of the textual
+// representation of its arguments.
+func JSEscaper(args ...interface{}) string {
+	return JSEscapeString(evalArgs(args))
+}
+
+// URLQueryEscaper returns the escaped value of the textual representation of
+// its arguments in a form suitable for embedding in a URL query.
+func URLQueryEscaper(args ...interface{}) string {
+	return url.QueryEscape(evalArgs(args))
+}
+
+// evalArgs formats the list of arguments into a string. It is therefore equivalent to
+//	fmt.Sprint(args...)
+// except that each argument is indirected (if a pointer), as required,
+// using the same rules as the default string evaluation during template
+// execution.
+func evalArgs(args []interface{}) string {
+	ok := false
+	var s string
+	// Fast path for simple common case.
+	if len(args) == 1 {
+		s, ok = args[0].(string)
+	}
+	if !ok {
+		for i, arg := range args {
+			a, ok := printableValue(reflect.ValueOf(arg))
+			if ok {
+				args[i] = a
+			} // else left fmt do its thing
+		}
+		s = fmt.Sprint(args...)
+	}
+	return s
+}
diff --git a/vendor/k8s.io/client-go/tools/cache/controller.go b/vendor/k8s.io/client-go/tools/cache/controller.go
index 27a1c52c..5d582119 100644
--- a/vendor/k8s.io/client-go/tools/cache/controller.go
+++ b/vendor/k8s.io/client-go/tools/cache/controller.go
@@ -26,7 +26,16 @@ import (
 	"k8s.io/apimachinery/pkg/util/wait"
 )
 
-// Config contains all the settings for a Controller.
+// This file implements a low-level controller that is used in
+// sharedIndexInformer, which is an implementation of
+// SharedIndexInformer.  Such informers, in turn, are key components
+// in the high level controllers that form the backbone of the
+// Kubernetes control plane.  Look at those for examples, or the
+// example in
+// https://github.com/kubernetes/client-go/tree/master/examples/workqueue
+// .
+
+// Config contains all the settings for one of these low-level controllers.
 type Config struct {
 	// The queue for your objects - has to be a DeltaFIFO due to
 	// assumptions in the implementation. Your Process() function
@@ -36,30 +45,29 @@ type Config struct {
 	// Something that can list and watch your objects.
 	ListerWatcher
 
-	// Something that can process your objects.
+	// Something that can process a popped Deltas.
 	Process ProcessFunc
 
-	// The type of your objects.
+	// ObjectType is an example object of the type this controller is
+	// expected to handle.  Only the type needs to be right, except
+	// that when that is `unstructured.Unstructured` the object's
+	// `"apiVersion"` and `"kind"` must also be right.
 	ObjectType runtime.Object
 
-	// Reprocess everything at least this often.
-	// Note that if it takes longer for you to clear the queue than this
-	// period, you will end up processing items in the order determined
-	// by FIFO.Replace(). Currently, this is random. If this is a
-	// problem, we can change that replacement policy to append new
-	// things to the end of the queue instead of replacing the entire
-	// queue.
+	// FullResyncPeriod is the period at which ShouldResync is considered.
 	FullResyncPeriod time.Duration
 
-	// ShouldResync, if specified, is invoked when the controller's reflector determines the next
-	// periodic sync should occur. If this returns true, it means the reflector should proceed with
-	// the resync.
+	// ShouldResync is periodically used by the reflector to determine
+	// whether to Resync the Queue. If ShouldResync is `nil` or
+	// returns true, it means the reflector should proceed with the
+	// resync.
 	ShouldResync ShouldResyncFunc
 
 	// If true, when Process() returns an error, re-enqueue the object.
 	// TODO: add interface to let you inject a delay/backoff or drop
 	//       the object completely if desired. Pass the object in
-	//       question to this interface as a parameter.
+	//       question to this interface as a parameter.  This is probably moot
+	//       now that this functionality appears at a higher level.
 	RetryOnError bool
 }
 
@@ -71,7 +79,7 @@ type ShouldResyncFunc func() bool
 // ProcessFunc processes a single object.
 type ProcessFunc func(obj interface{}) error
 
-// Controller is a generic controller framework.
+// `*controller` implements Controller
 type controller struct {
 	config         Config
 	reflector      *Reflector
@@ -79,10 +87,22 @@ type controller struct {
 	clock          clock.Clock
 }
 
-// Controller is a generic controller framework.
+// Controller is a low-level controller that is parameterized by a
+// Config and used in sharedIndexInformer.
 type Controller interface {
+	// Run does two things.  One is to construct and run a Reflector
+	// to pump objects/notifications from the Config's ListerWatcher
+	// to the Config's Queue and possibly invoke the occasional Resync
+	// on that Queue.  The other is to repeatedly Pop from the Queue
+	// and process with the Config's ProcessFunc.  Both of these
+	// continue until `stopCh` is closed.
 	Run(stopCh <-chan struct{})
+
+	// HasSynced delegates to the Config's Queue
 	HasSynced() bool
+
+	// LastSyncResourceVersion delegates to the Reflector when there
+	// is one, otherwise returns the empty string
 	LastSyncResourceVersion() string
 }
 
@@ -95,7 +115,7 @@ func New(c *Config) Controller {
 	return ctlr
 }
 
-// Run begins processing items, and will continue until a value is sent down stopCh.
+// Run begins processing items, and will continue until a value is sent down stopCh or it is closed.
 // It's an error to call Run more than once.
 // Run blocks; call via go.
 func (c *controller) Run(stopCh <-chan struct{}) {
@@ -344,7 +364,10 @@ func newInformer(
 	// This will hold incoming changes. Note how we pass clientState in as a
 	// KeyLister, that way resync operations will result in the correct set
 	// of update/delete deltas.
-	fifo := NewDeltaFIFO(MetaNamespaceKeyFunc, clientState)
+	fifo := NewDeltaFIFOWithOptions(DeltaFIFOOptions{
+		KnownObjects:          clientState,
+		EmitDeltaTypeReplaced: true,
+	})
 
 	cfg := &Config{
 		Queue:            fifo,
@@ -357,7 +380,7 @@ func newInformer(
 			// from oldest to newest
 			for _, d := range obj.(Deltas) {
 				switch d.Type {
-				case Sync, Added, Updated:
+				case Sync, Replaced, Added, Updated:
 					if old, exists, err := clientState.Get(d.Object); err == nil && exists {
 						if err := clientState.Update(d.Object); err != nil {
 							return err
diff --git a/vendor/k8s.io/client-go/tools/cache/delta_fifo.go b/vendor/k8s.io/client-go/tools/cache/delta_fifo.go
index 55ecdcdf..40b6022c 100644
--- a/vendor/k8s.io/client-go/tools/cache/delta_fifo.go
+++ b/vendor/k8s.io/client-go/tools/cache/delta_fifo.go
@@ -26,15 +26,16 @@ import (
 	"k8s.io/klog"
 )
 
-// NewDeltaFIFO returns a Store which can be used process changes to items.
+// NewDeltaFIFO returns a Queue which can be used to process changes to items.
 //
-// keyFunc is used to figure out what key an object should have. (It's
-// exposed in the returned DeltaFIFO's KeyOf() method, with bonus features.)
+// keyFunc is used to figure out what key an object should have. (It is
+// exposed in the returned DeltaFIFO's KeyOf() method, with additional handling
+// around deleted objects and queue state).
+//
+// 'knownObjects' may be supplied to modify the behavior of Delete,
+// Replace, and Resync.  It may be nil if you do not need those
+// modifications.
 //
-// 'keyLister' is expected to return a list of keys that the consumer of
-// this queue "knows about". It is used to decide which items are missing
-// when Replace() is called; 'Deleted' deltas are produced for these items.
-// It may be nil if you don't need to detect all deletions.
 // TODO: consider merging keyLister with this object, tracking a list of
 //       "known" keys when Pop() is called. Have to think about how that
 //       affects error retrying.
@@ -56,18 +57,79 @@ import (
 //       and internal tests.
 //
 // Also see the comment on DeltaFIFO.
+//
+// Warning: This constructs a DeltaFIFO that does not differentiate between
+// events caused by a call to Replace (e.g., from a relist, which may
+// contain object updates), and synthetic events caused by a periodic resync
+// (which just emit the existing object). See https://issue.k8s.io/86015 for details.
+//
+// Use `NewDeltaFIFOWithOptions(DeltaFIFOOptions{..., EmitDeltaTypeReplaced: true})`
+// instead to receive a `Replaced` event depending on the type.
+//
+// Deprecated: Equivalent to NewDeltaFIFOWithOptions(DeltaFIFOOptions{KeyFunction: keyFunc, KnownObjects: knownObjects})
 func NewDeltaFIFO(keyFunc KeyFunc, knownObjects KeyListerGetter) *DeltaFIFO {
+	return NewDeltaFIFOWithOptions(DeltaFIFOOptions{
+		KeyFunction:  keyFunc,
+		KnownObjects: knownObjects,
+	})
+}
+
+// DeltaFIFOOptions is the configuration parameters for DeltaFIFO. All are
+// optional.
+type DeltaFIFOOptions struct {
+
+	// KeyFunction is used to figure out what key an object should have. (It's
+	// exposed in the returned DeltaFIFO's KeyOf() method, with additional
+	// handling around deleted objects and queue state).
+	// Optional, the default is MetaNamespaceKeyFunc.
+	KeyFunction KeyFunc
+
+	// KnownObjects is expected to return a list of keys that the consumer of
+	// this queue "knows about". It is used to decide which items are missing
+	// when Replace() is called; 'Deleted' deltas are produced for the missing items.
+	// KnownObjects may be nil if you can tolerate missing deletions on Replace().
+	KnownObjects KeyListerGetter
+
+	// EmitDeltaTypeReplaced indicates that the queue consumer
+	// understands the Replaced DeltaType. Before the `Replaced` event type was
+	// added, calls to Replace() were handled the same as Sync(). For
+	// backwards-compatibility purposes, this is false by default.
+	// When true, `Replaced` events will be sent for items passed to a Replace() call.
+	// When false, `Sync` events will be sent instead.
+	EmitDeltaTypeReplaced bool
+}
+
+// NewDeltaFIFOWithOptions returns a Store which can be used process changes to
+// items. See also the comment on DeltaFIFO.
+func NewDeltaFIFOWithOptions(opts DeltaFIFOOptions) *DeltaFIFO {
+	if opts.KeyFunction == nil {
+		opts.KeyFunction = MetaNamespaceKeyFunc
+	}
+
 	f := &DeltaFIFO{
 		items:        map[string]Deltas{},
 		queue:        []string{},
-		keyFunc:      keyFunc,
-		knownObjects: knownObjects,
+		keyFunc:      opts.KeyFunction,
+		knownObjects: opts.KnownObjects,
+
+		emitDeltaTypeReplaced: opts.EmitDeltaTypeReplaced,
 	}
 	f.cond.L = &f.lock
 	return f
 }
 
-// DeltaFIFO is like FIFO, but allows you to process deletes.
+// DeltaFIFO is like FIFO, but differs in two ways.  One is that the
+// accumulator associated with a given object's key is not that object
+// but rather a Deltas, which is a slice of Delta values for that
+// object.  Applying an object to a Deltas means to append a Delta
+// except when the potentially appended Delta is a Deleted and the
+// Deltas already ends with a Deleted.  In that case the Deltas does
+// not grow, although the terminal Deleted will be replaced by the new
+// Deleted if the older Deleted's object is a
+// DeletedFinalStateUnknown.
+//
+// The other difference is that DeltaFIFO has an additional way that
+// an object can be applied to an accumulator, called Sync.
 //
 // DeltaFIFO is a producer-consumer queue, where a Reflector is
 // intended to be the producer, and the consumer is whatever calls
@@ -77,22 +139,22 @@ func NewDeltaFIFO(keyFunc KeyFunc, knownObjects KeyListerGetter) *DeltaFIFO {
 //  * You want to process every object change (delta) at most once.
 //  * When you process an object, you want to see everything
 //    that's happened to it since you last processed it.
-//  * You want to process the deletion of objects.
+//  * You want to process the deletion of some of the objects.
 //  * You might want to periodically reprocess objects.
 //
 // DeltaFIFO's Pop(), Get(), and GetByKey() methods return
-// interface{} to satisfy the Store/Queue interfaces, but it
+// interface{} to satisfy the Store/Queue interfaces, but they
 // will always return an object of type Deltas.
 //
+// A DeltaFIFO's knownObjects KeyListerGetter provides the abilities
+// to list Store keys and to get objects by Store key.  The objects in
+// question are called "known objects" and this set of objects
+// modifies the behavior of the Delete, Replace, and Resync methods
+// (each in a different way).
+//
 // A note on threading: If you call Pop() in parallel from multiple
 // threads, you could end up with multiple threads processing slightly
 // different versions of the same object.
-//
-// A note on the KeyLister used by the DeltaFIFO: It's main purpose is
-// to list keys that are "known", for the purpose of figuring out which
-// items have been deleted when Replace() or Delete() are called. The deleted
-// object will be included in the DeleteFinalStateUnknown markers. These objects
-// could be stale.
 type DeltaFIFO struct {
 	// lock/cond protects access to 'items' and 'queue'.
 	lock sync.RWMutex
@@ -114,9 +176,8 @@ type DeltaFIFO struct {
 	// insertion and retrieval, and should be deterministic.
 	keyFunc KeyFunc
 
-	// knownObjects list keys that are "known", for the
-	// purpose of figuring out which items have been deleted
-	// when Replace() or Delete() is called.
+	// knownObjects list keys that are "known" --- affecting Delete(),
+	// Replace(), and Resync()
 	knownObjects KeyListerGetter
 
 	// Indication the queue is closed.
@@ -124,6 +185,10 @@ type DeltaFIFO struct {
 	// Currently, not used to gate any of CRED operations.
 	closed     bool
 	closedLock sync.Mutex
+
+	// emitDeltaTypeReplaced is whether to emit the Replaced or Sync
+	// DeltaType when Replace() is called (to preserve backwards compat).
+	emitDeltaTypeReplaced bool
 }
 
 var (
@@ -185,9 +250,11 @@ func (f *DeltaFIFO) Update(obj interface{}) error {
 	return f.queueActionLocked(Updated, obj)
 }
 
-// Delete is just like Add, but makes an Deleted Delta. If the item does not
-// already exist, it will be ignored. (It may have already been deleted by a
-// Replace (re-list), for example.
+// Delete is just like Add, but makes a Deleted Delta. If the given
+// object does not already exist, it will be ignored. (It may have
+// already been deleted by a Replace (re-list), for example.)  In this
+// method `f.knownObjects`, if not nil, provides (via GetByKey)
+// _additional_ objects that are considered to already exist.
 func (f *DeltaFIFO) Delete(obj interface{}) error {
 	id, err := f.KeyOf(obj)
 	if err != nil {
@@ -313,6 +380,9 @@ func (f *DeltaFIFO) queueActionLocked(actionType DeltaType, obj interface{}) err
 		f.items[id] = newDeltas
 		f.cond.Broadcast()
 	} else {
+		// This never happens, because dedupDeltas never returns an empty list
+		// when given a non-empty list (as it is here).
+		// But if somehow it ever does return an empty list, then
 		// We need to remove this from our map (extra items in the queue are
 		// ignored if they are not in the map).
 		delete(f.items, id)
@@ -430,22 +500,34 @@ func (f *DeltaFIFO) Pop(process PopProcessFunc) (interface{}, error) {
 	}
 }
 
-// Replace will delete the contents of 'f', using instead the given map.
-// 'f' takes ownership of the map, you should not reference the map again
-// after calling this function. f's queue is reset, too; upon return, it
-// will contain the items in the map, in no particular order.
+// Replace atomically does two things: (1) it adds the given objects
+// using the Sync or Replace DeltaType and then (2) it does some deletions.
+// In particular: for every pre-existing key K that is not the key of
+// an object in `list` there is the effect of
+// `Delete(DeletedFinalStateUnknown{K, O})` where O is current object
+// of K.  If `f.knownObjects == nil` then the pre-existing keys are
+// those in `f.items` and the current object of K is the `.Newest()`
+// of the Deltas associated with K.  Otherwise the pre-existing keys
+// are those listed by `f.knownObjects` and the current object of K is
+// what `f.knownObjects.GetByKey(K)` returns.
 func (f *DeltaFIFO) Replace(list []interface{}, resourceVersion string) error {
 	f.lock.Lock()
 	defer f.lock.Unlock()
 	keys := make(sets.String, len(list))
 
+	// keep backwards compat for old clients
+	action := Sync
+	if f.emitDeltaTypeReplaced {
+		action = Replaced
+	}
+
 	for _, item := range list {
 		key, err := f.KeyOf(item)
 		if err != nil {
 			return KeyError{item, err}
 		}
 		keys.Insert(key)
-		if err := f.queueActionLocked(Sync, item); err != nil {
+		if err := f.queueActionLocked(action, item); err != nil {
 			return fmt.Errorf("couldn't enqueue object: %v", err)
 		}
 	}
@@ -507,7 +589,9 @@ func (f *DeltaFIFO) Replace(list []interface{}, resourceVersion string) error {
 	return nil
 }
 
-// Resync will send a sync event for each item
+// Resync adds, with a Sync type of Delta, every object listed by
+// `f.knownObjects` whose key is not already queued for processing.
+// If `f.knownObjects` is `nil` then Resync does nothing.
 func (f *DeltaFIFO) Resync() error {
 	f.lock.Lock()
 	defer f.lock.Unlock()
@@ -577,10 +661,14 @@ const (
 	Added   DeltaType = "Added"
 	Updated DeltaType = "Updated"
 	Deleted DeltaType = "Deleted"
-	// The other types are obvious. You'll get Sync deltas when:
-	//  * A watch expires/errors out and a new list/watch cycle is started.
-	//  * You've turned on periodic syncs.
-	// (Anything that trigger's DeltaFIFO's Replace() method.)
+	// Replaced is emitted when we encountered watch errors and had to do a
+	// relist. We don't know if the replaced object has changed.
+	//
+	// NOTE: Previous versions of DeltaFIFO would use Sync for Replace events
+	// as well. Hence, Replaced is only emitted when the option
+	// EmitDeltaTypeReplaced is true.
+	Replaced DeltaType = "Replaced"
+	// Sync is for synthetic events during a periodic resync.
 	Sync DeltaType = "Sync"
 )
 
diff --git a/vendor/k8s.io/client-go/tools/cache/expiration_cache.go b/vendor/k8s.io/client-go/tools/cache/expiration_cache.go
index 14ad492e..e687593f 100644
--- a/vendor/k8s.io/client-go/tools/cache/expiration_cache.go
+++ b/vendor/k8s.io/client-go/tools/cache/expiration_cache.go
@@ -194,9 +194,9 @@ func (c *ExpirationCache) Replace(list []interface{}, resourceVersion string) er
 	return nil
 }
 
-// Resync will touch all objects to put them into the processing queue
+// Resync is a no-op for one of these
 func (c *ExpirationCache) Resync() error {
-	return c.cacheStorage.Resync()
+	return nil
 }
 
 // NewTTLStore creates and returns a ExpirationCache with a TTLPolicy
diff --git a/vendor/k8s.io/client-go/tools/cache/fifo.go b/vendor/k8s.io/client-go/tools/cache/fifo.go
index 7a3bc3d3..67bb1cba 100644
--- a/vendor/k8s.io/client-go/tools/cache/fifo.go
+++ b/vendor/k8s.io/client-go/tools/cache/fifo.go
@@ -24,7 +24,7 @@ import (
 )
 
 // PopProcessFunc is passed to Pop() method of Queue interface.
-// It is supposed to process the element popped from the queue.
+// It is supposed to process the accumulator popped from the queue.
 type PopProcessFunc func(interface{}) error
 
 // ErrRequeue may be returned by a PopProcessFunc to safely requeue
@@ -44,26 +44,38 @@ func (e ErrRequeue) Error() string {
 	return e.Err.Error()
 }
 
-// Queue is exactly like a Store, but has a Pop() method too.
+// Queue extends Store with a collection of Store keys to "process".
+// Every Add, Update, or Delete may put the object's key in that collection.
+// A Queue has a way to derive the corresponding key given an accumulator.
+// A Queue can be accessed concurrently from multiple goroutines.
+// A Queue can be "closed", after which Pop operations return an error.
 type Queue interface {
 	Store
 
-	// Pop blocks until it has something to process.
-	// It returns the object that was process and the result of processing.
-	// The PopProcessFunc may return an ErrRequeue{...} to indicate the item
-	// should be requeued before releasing the lock on the queue.
+	// Pop blocks until there is at least one key to process or the
+	// Queue is closed.  In the latter case Pop returns with an error.
+	// In the former case Pop atomically picks one key to process,
+	// removes that (key, accumulator) association from the Store, and
+	// processes the accumulator.  Pop returns the accumulator that
+	// was processed and the result of processing.  The PopProcessFunc
+	// may return an ErrRequeue{inner} and in this case Pop will (a)
+	// return that (key, accumulator) association to the Queue as part
+	// of the atomic processing and (b) return the inner error from
+	// Pop.
 	Pop(PopProcessFunc) (interface{}, error)
 
-	// AddIfNotPresent adds a value previously
-	// returned by Pop back into the queue as long
-	// as nothing else (presumably more recent)
-	// has since been added.
+	// AddIfNotPresent puts the given accumulator into the Queue (in
+	// association with the accumulator's key) if and only if that key
+	// is not already associated with a non-empty accumulator.
 	AddIfNotPresent(interface{}) error
 
-	// HasSynced returns true if the first batch of items has been popped
+	// HasSynced returns true if the first batch of keys have all been
+	// popped.  The first batch of keys are those of the first Replace
+	// operation if that happened before any Add, Update, or Delete;
+	// otherwise the first batch is empty.
 	HasSynced() bool
 
-	// Close queue
+	// Close the queue
 	Close()
 }
 
@@ -79,11 +91,16 @@ func Pop(queue Queue) interface{} {
 	return result
 }
 
-// FIFO receives adds and updates from a Reflector, and puts them in a queue for
-// FIFO order processing. If multiple adds/updates of a single item happen while
-// an item is in the queue before it has been processed, it will only be
-// processed once, and when it is processed, the most recent version will be
-// processed. This can't be done with a channel.
+// FIFO is a Queue in which (a) each accumulator is simply the most
+// recently provided object and (b) the collection of keys to process
+// is a FIFO.  The accumulators all start out empty, and deleting an
+// object from its accumulator empties the accumulator.  The Resync
+// operation is a no-op.
+//
+// Thus: if multiple adds/updates of a single object happen while that
+// object's key is in the queue before it has been processed then it
+// will only be processed once, and when it is processed the most
+// recent version will be processed. This can't be done with a channel
 //
 // FIFO solves this use case:
 //  * You want to process every object (exactly) once.
@@ -94,7 +111,7 @@ func Pop(queue Queue) interface{} {
 type FIFO struct {
 	lock sync.RWMutex
 	cond sync.Cond
-	// We depend on the property that items in the set are in the queue and vice versa.
+	// We depend on the property that every key in `items` is also in `queue`
 	items map[string]interface{}
 	queue []string
 
@@ -326,7 +343,8 @@ func (f *FIFO) Replace(list []interface{}, resourceVersion string) error {
 	return nil
 }
 
-// Resync will touch all objects to put them into the processing queue
+// Resync will ensure that every object in the Store has its key in the queue.
+// This should be a no-op, because that property is maintained by all operations.
 func (f *FIFO) Resync() error {
 	f.lock.Lock()
 	defer f.lock.Unlock()
diff --git a/vendor/k8s.io/client-go/tools/cache/index.go b/vendor/k8s.io/client-go/tools/cache/index.go
index bbfb3b55..fa29e6a7 100644
--- a/vendor/k8s.io/client-go/tools/cache/index.go
+++ b/vendor/k8s.io/client-go/tools/cache/index.go
@@ -23,12 +23,15 @@ import (
 	"k8s.io/apimachinery/pkg/util/sets"
 )
 
-// Indexer is a storage interface that lets you list objects using multiple indexing functions.
-// There are three kinds of strings here.
-// One is a storage key, as defined in the Store interface.
-// Another kind is a name of an index.
-// The third kind of string is an "indexed value", which is produced by an
-// IndexFunc and can be a field value or any other string computed from the object.
+// Indexer extends Store with multiple indices and restricts each
+// accumulator to simply hold the current object (and be empty after
+// Delete).
+//
+// There are three kinds of strings here:
+// 1. a storage key, as defined in the Store interface,
+// 2. a name of an index, and
+// 3. an "indexed value", which is produced by an IndexFunc and
+//    can be a field value or any other string computed from the object.
 type Indexer interface {
 	Store
 	// Index returns the stored objects whose set of indexed values
diff --git a/vendor/k8s.io/client-go/tools/cache/listwatch.go b/vendor/k8s.io/client-go/tools/cache/listwatch.go
index 8227b73b..10b7e651 100644
--- a/vendor/k8s.io/client-go/tools/cache/listwatch.go
+++ b/vendor/k8s.io/client-go/tools/cache/listwatch.go
@@ -24,7 +24,6 @@ import (
 	"k8s.io/apimachinery/pkg/runtime"
 	"k8s.io/apimachinery/pkg/watch"
 	restclient "k8s.io/client-go/rest"
-	"k8s.io/client-go/tools/pager"
 )
 
 // Lister is any object that knows how to perform an initial list.
@@ -85,7 +84,7 @@ func NewFilteredListWatchFromClient(c Getter, resource string, namespace string,
 			Namespace(namespace).
 			Resource(resource).
 			VersionedParams(&options, metav1.ParameterCodec).
-			Do().
+			Do(context.TODO()).
 			Get()
 	}
 	watchFunc := func(options metav1.ListOptions) (watch.Interface, error) {
@@ -95,16 +94,15 @@ func NewFilteredListWatchFromClient(c Getter, resource string, namespace string,
 			Namespace(namespace).
 			Resource(resource).
 			VersionedParams(&options, metav1.ParameterCodec).
-			Watch()
+			Watch(context.TODO())
 	}
 	return &ListWatch{ListFunc: listFunc, WatchFunc: watchFunc}
 }
 
 // List a set of apiserver resources
 func (lw *ListWatch) List(options metav1.ListOptions) (runtime.Object, error) {
-	if !lw.DisableChunking {
-		return pager.New(pager.SimplePageFunc(lw.ListFunc)).List(context.TODO(), options)
-	}
+	// ListWatch is used in Reflector, which already supports pagination.
+	// Don't paginate here to avoid duplication.
 	return lw.ListFunc(options)
 }
 
diff --git a/vendor/k8s.io/client-go/tools/cache/mutation_detector.go b/vendor/k8s.io/client-go/tools/cache/mutation_detector.go
index fa6acab3..bbec7d06 100644
--- a/vendor/k8s.io/client-go/tools/cache/mutation_detector.go
+++ b/vendor/k8s.io/client-go/tools/cache/mutation_detector.go
@@ -36,9 +36,12 @@ func init() {
 	mutationDetectionEnabled, _ = strconv.ParseBool(os.Getenv("KUBE_CACHE_MUTATION_DETECTOR"))
 }
 
-// MutationDetector is able to monitor if the object be modified outside.
+// MutationDetector is able to monitor objects for mutation within a limited window of time
 type MutationDetector interface {
+	// AddObject adds the given object to the set being monitored for a while from now
 	AddObject(obj interface{})
+
+	// Run starts the monitoring and does not return until the monitoring is stopped.
 	Run(stopCh <-chan struct{})
 }
 
@@ -65,7 +68,13 @@ type defaultCacheMutationDetector struct {
 	name   string
 	period time.Duration
 
-	lock       sync.Mutex
+	// compareLock ensures only a single call to CompareObjects runs at a time
+	compareObjectsLock sync.Mutex
+
+	// addLock guards addedObjs between AddObject and CompareObjects
+	addedObjsLock sync.Mutex
+	addedObjs     []cacheObj
+
 	cachedObjs []cacheObj
 
 	retainDuration     time.Duration
@@ -115,15 +124,22 @@ func (d *defaultCacheMutationDetector) AddObject(obj interface{}) {
 	if obj, ok := obj.(runtime.Object); ok {
 		copiedObj := obj.DeepCopyObject()
 
-		d.lock.Lock()
-		defer d.lock.Unlock()
-		d.cachedObjs = append(d.cachedObjs, cacheObj{cached: obj, copied: copiedObj})
+		d.addedObjsLock.Lock()
+		defer d.addedObjsLock.Unlock()
+		d.addedObjs = append(d.addedObjs, cacheObj{cached: obj, copied: copiedObj})
 	}
 }
 
 func (d *defaultCacheMutationDetector) CompareObjects() {
-	d.lock.Lock()
-	defer d.lock.Unlock()
+	d.compareObjectsLock.Lock()
+	defer d.compareObjectsLock.Unlock()
+
+	// move addedObjs into cachedObjs under lock
+	// this keeps the critical section small to avoid blocking AddObject while we compare cachedObjs
+	d.addedObjsLock.Lock()
+	d.cachedObjs = append(d.cachedObjs, d.addedObjs...)
+	d.addedObjs = nil
+	d.addedObjsLock.Unlock()
 
 	altered := false
 	for i, obj := range d.cachedObjs {
diff --git a/vendor/k8s.io/client-go/tools/cache/reflector.go b/vendor/k8s.io/client-go/tools/cache/reflector.go
index 62749ed7..dfdc2e73 100644
--- a/vendor/k8s.io/client-go/tools/cache/reflector.go
+++ b/vendor/k8s.io/client-go/tools/cache/reflector.go
@@ -26,7 +26,7 @@ import (
 	"sync"
 	"time"
 
-	apierrs "k8s.io/apimachinery/pkg/api/errors"
+	apierrors "k8s.io/apimachinery/pkg/api/errors"
 	"k8s.io/apimachinery/pkg/api/meta"
 	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
 	"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
@@ -55,7 +55,10 @@ type Reflector struct {
 	// stringification of expectedType otherwise. It is for display
 	// only, and should not be used for parsing or comparison.
 	expectedTypeName string
-	// The type of object we expect to place in the store.
+	// An example object of the type we expect to place in the store.
+	// Only the type needs to be right, except that when that is
+	// `unstructured.Unstructured` the object's `"apiVersion"` and
+	// `"kind"` must also be right.
 	expectedType reflect.Type
 	// The GVK of the object we expect to place in the store if unstructured.
 	expectedGVK *schema.GroupVersionKind
@@ -63,21 +66,34 @@ type Reflector struct {
 	store Store
 	// listerWatcher is used to perform lists and watches.
 	listerWatcher ListerWatcher
-	// period controls timing between one watch ending and
-	// the beginning of the next one.
-	period       time.Duration
+
+	// backoff manages backoff of ListWatch
+	backoffManager wait.BackoffManager
+
 	resyncPeriod time.Duration
+	// ShouldResync is invoked periodically and whenever it returns `true` the Store's Resync operation is invoked
 	ShouldResync func() bool
 	// clock allows tests to manipulate time
 	clock clock.Clock
+	// paginatedResult defines whether pagination should be forced for list calls.
+	// It is set based on the result of the initial list call.
+	paginatedResult bool
 	// lastSyncResourceVersion is the resource version token last
 	// observed when doing a sync with the underlying store
 	// it is thread safe, but not synchronized with the underlying store
 	lastSyncResourceVersion string
+	// isLastSyncResourceVersionGone is true if the previous list or watch request with lastSyncResourceVersion
+	// failed with an HTTP 410 (Gone) status code.
+	isLastSyncResourceVersionGone bool
 	// lastSyncResourceVersionMutex guards read/write access to lastSyncResourceVersion
 	lastSyncResourceVersionMutex sync.RWMutex
 	// WatchListPageSize is the requested chunk size of initial and resync watch lists.
-	// Defaults to pager.PageSize.
+	// If unset, for consistent reads (RV="") or reads that opt-into arbitrarily old data
+	// (RV="0") it will default to pager.PageSize, for the rest (RV != "" && RV != "0")
+	// it will turn off pagination to allow serving them from watch cache.
+	// NOTE: It should be used carefully as paginated lists are always served directly from
+	// etcd, which is significantly less efficient and may lead to serious performance and
+	// scalability problems.
 	WatchListPageSize int64
 }
 
@@ -95,25 +111,33 @@ func NewNamespaceKeyedIndexerAndReflector(lw ListerWatcher, expectedType interfa
 	return indexer, reflector
 }
 
-// NewReflector creates a new Reflector object which will keep the given store up to
-// date with the server's contents for the given resource. Reflector promises to
-// only put things in the store that have the type of expectedType, unless expectedType
-// is nil. If resyncPeriod is non-zero, then lists will be executed after every
-// resyncPeriod, so that you can use reflectors to periodically process everything as
-// well as incrementally processing the things that change.
+// NewReflector creates a new Reflector object which will keep the
+// given store up to date with the server's contents for the given
+// resource. Reflector promises to only put things in the store that
+// have the type of expectedType, unless expectedType is nil. If
+// resyncPeriod is non-zero, then the reflector will periodically
+// consult its ShouldResync function to determine whether to invoke
+// the Store's Resync operation; `ShouldResync==nil` means always
+// "yes".  This enables you to use reflectors to periodically process
+// everything as well as incrementally processing the things that
+// change.
 func NewReflector(lw ListerWatcher, expectedType interface{}, store Store, resyncPeriod time.Duration) *Reflector {
 	return NewNamedReflector(naming.GetNameFromCallsite(internalPackages...), lw, expectedType, store, resyncPeriod)
 }
 
 // NewNamedReflector same as NewReflector, but with a specified name for logging
 func NewNamedReflector(name string, lw ListerWatcher, expectedType interface{}, store Store, resyncPeriod time.Duration) *Reflector {
+	realClock := &clock.RealClock{}
 	r := &Reflector{
 		name:          name,
 		listerWatcher: lw,
 		store:         store,
-		period:        time.Second,
-		resyncPeriod:  resyncPeriod,
-		clock:         &clock.RealClock{},
+		// We used to make the call every 1sec (1 QPS), the goal here is to achieve ~98% traffic reduction when
+		// API server is not healthy. With these parameters, backoff will stop at [30,60) sec interval which is
+		// 0.22 QPS. If we don't backoff for 2min, assume API server is healthy and we reset the backoff.
+		backoffManager: wait.NewExponentialBackoffManager(800*time.Millisecond, 30*time.Second, 2*time.Minute, 2.0, 1.0, realClock),
+		resyncPeriod:   resyncPeriod,
+		clock:          realClock,
 	}
 	r.setExpectedType(expectedType)
 	return r
@@ -144,15 +168,17 @@ func (r *Reflector) setExpectedType(expectedType interface{}) {
 // call chains to NewReflector, so they'd be low entropy names for reflectors
 var internalPackages = []string{"client-go/tools/cache/"}
 
-// Run starts a watch and handles watch events. Will restart the watch if it is closed.
+// Run repeatedly uses the reflector's ListAndWatch to fetch all the
+// objects and subsequent deltas.
 // Run will exit when stopCh is closed.
 func (r *Reflector) Run(stopCh <-chan struct{}) {
-	klog.V(3).Infof("Starting reflector %v (%s) from %s", r.expectedTypeName, r.resyncPeriod, r.name)
-	wait.Until(func() {
+	klog.V(2).Infof("Starting reflector %s (%s) from %s", r.expectedTypeName, r.resyncPeriod, r.name)
+	wait.BackoffUntil(func() {
 		if err := r.ListAndWatch(stopCh); err != nil {
 			utilruntime.HandleError(err)
 		}
-	}, r.period, stopCh)
+	}, r.backoffManager, true, stopCh)
+	klog.V(2).Infof("Stopping reflector %s (%s) from %s", r.expectedTypeName, r.resyncPeriod, r.name)
 }
 
 var (
@@ -185,15 +211,13 @@ func (r *Reflector) ListAndWatch(stopCh <-chan struct{}) error {
 	klog.V(3).Infof("Listing and watching %v from %s", r.expectedTypeName, r.name)
 	var resourceVersion string
 
-	// Explicitly set "0" as resource version - it's fine for the List()
-	// to be served from cache and potentially be delayed relative to
-	// etcd contents. Reflector framework will catch up via Watch() eventually.
-	options := metav1.ListOptions{ResourceVersion: "0"}
+	options := metav1.ListOptions{ResourceVersion: r.relistResourceVersion()}
 
 	if err := func() error {
 		initTrace := trace.New("Reflector ListAndWatch", trace.Field{"name", r.name})
 		defer initTrace.LogIfLong(10 * time.Second)
 		var list runtime.Object
+		var paginatedResult bool
 		var err error
 		listCh := make(chan struct{}, 1)
 		panicCh := make(chan interface{}, 1)
@@ -208,11 +232,39 @@ func (r *Reflector) ListAndWatch(stopCh <-chan struct{}) error {
 			pager := pager.New(pager.SimplePageFunc(func(opts metav1.ListOptions) (runtime.Object, error) {
 				return r.listerWatcher.List(opts)
 			}))
-			if r.WatchListPageSize != 0 {
+			switch {
+			case r.WatchListPageSize != 0:
 				pager.PageSize = r.WatchListPageSize
+			case r.paginatedResult:
+				// We got a paginated result initially. Assume this resource and server honor
+				// paging requests (i.e. watch cache is probably disabled) and leave the default
+				// pager size set.
+			case options.ResourceVersion != "" && options.ResourceVersion != "0":
+				// User didn't explicitly request pagination.
+				//
+				// With ResourceVersion != "", we have a possibility to list from watch cache,
+				// but we do that (for ResourceVersion != "0") only if Limit is unset.
+				// To avoid thundering herd on etcd (e.g. on master upgrades), we explicitly
+				// switch off pagination to force listing from watch cache (if enabled).
+				// With the existing semantic of RV (result is at least as fresh as provided RV),
+				// this is correct and doesn't lead to going back in time.
+				//
+				// We also don't turn off pagination for ResourceVersion="0", since watch cache
+				// is ignoring Limit in that case anyway, and if watch cache is not enabled
+				// we don't introduce regression.
+				pager.PageSize = 0
+			}
+
+			list, paginatedResult, err = pager.List(context.Background(), options)
+			if isExpiredError(err) {
+				r.setIsLastSyncResourceVersionExpired(true)
+				// Retry immediately if the resource version used to list is expired.
+				// The pager already falls back to full list if paginated list calls fail due to an "Expired" error on
+				// continuation pages, but the pager might not be enabled, or the full list might fail because the
+				// resource version it is listing at is expired, so we need to fallback to resourceVersion="" in all
+				// to recover and ensure the reflector makes forward progress.
+				list, paginatedResult, err = pager.List(context.Background(), metav1.ListOptions{ResourceVersion: r.relistResourceVersion()})
 			}
-			// Pager falls back to full list if paginated list calls fail due to an "Expired" error.
-			list, err = pager.List(context.Background(), options)
 			close(listCh)
 		}()
 		select {
@@ -225,6 +277,22 @@ func (r *Reflector) ListAndWatch(stopCh <-chan struct{}) error {
 		if err != nil {
 			return fmt.Errorf("%s: Failed to list %v: %v", r.name, r.expectedTypeName, err)
 		}
+
+		// We check if the list was paginated and if so set the paginatedResult based on that.
+		// However, we want to do that only for the initial list (which is the only case
+		// when we set ResourceVersion="0"). The reasoning behind it is that later, in some
+		// situations we may force listing directly from etcd (by setting ResourceVersion="")
+		// which will return paginated result, even if watch cache is enabled. However, in
+		// that case, we still want to prefer sending requests to watch cache if possible.
+		//
+		// Paginated result returned for request with ResourceVersion="0" mean that watch
+		// cache is disabled and there are a lot of objects of a given type. In such case,
+		// there is no need to prefer listing from watch cache.
+		if options.ResourceVersion == "0" && paginatedResult {
+			r.paginatedResult = true
+		}
+
+		r.setIsLastSyncResourceVersionExpired(false) // list was successful
 		initTrace.Step("Objects listed")
 		listMetaInterface, err := meta.ListAccessor(list)
 		if err != nil {
@@ -298,10 +366,15 @@ func (r *Reflector) ListAndWatch(stopCh <-chan struct{}) error {
 
 		w, err := r.listerWatcher.Watch(options)
 		if err != nil {
-			switch err {
-			case io.EOF:
+			switch {
+			case isExpiredError(err):
+				// Don't set LastSyncResourceVersionExpired - LIST call with ResourceVersion=RV already
+				// has a semantic that it returns data at least as fresh as provided RV.
+				// So first try to LIST with setting RV to resource version of last observed object.
+				klog.V(4).Infof("%s: watch of %v closed with: %v", r.name, r.expectedTypeName, err)
+			case err == io.EOF:
 				// watch closed normally
-			case io.ErrUnexpectedEOF:
+			case err == io.ErrUnexpectedEOF:
 				klog.V(1).Infof("%s: Watch for %v closed with unexpected EOF: %v", r.name, r.expectedTypeName, err)
 			default:
 				utilruntime.HandleError(fmt.Errorf("%s: Failed to watch %v: %v", r.name, r.expectedTypeName, err))
@@ -320,8 +393,11 @@ func (r *Reflector) ListAndWatch(stopCh <-chan struct{}) error {
 		if err := r.watchHandler(w, &resourceVersion, resyncerrc, stopCh); err != nil {
 			if err != errorStopRequested {
 				switch {
-				case apierrs.IsResourceExpired(err):
-					klog.V(4).Infof("%s: watch of %v ended with: %v", r.name, r.expectedTypeName, err)
+				case isExpiredError(err):
+					// Don't set LastSyncResourceVersionExpired - LIST call with ResourceVersion=RV already
+					// has a semantic that it returns data at least as fresh as provided RV.
+					// So first try to LIST with setting RV to resource version of last observed object.
+					klog.V(4).Infof("%s: watch of %v closed with: %v", r.name, r.expectedTypeName, err)
 				default:
 					klog.Warningf("%s: watch of %v ended with: %v", r.name, r.expectedTypeName, err)
 				}
@@ -361,7 +437,7 @@ loop:
 				break loop
 			}
 			if event.Type == watch.Error {
-				return apierrs.FromObject(event.Object)
+				return apierrors.FromObject(event.Object)
 			}
 			if r.expectedType != nil {
 				if e, a := r.expectedType, reflect.TypeOf(event.Object); e != a {
@@ -432,3 +508,42 @@ func (r *Reflector) setLastSyncResourceVersion(v string) {
 	defer r.lastSyncResourceVersionMutex.Unlock()
 	r.lastSyncResourceVersion = v
 }
+
+// relistResourceVersion determines the resource version the reflector should list or relist from.
+// Returns either the lastSyncResourceVersion so that this reflector will relist with a resource
+// versions no older than has already been observed in relist results or watch events, or, if the last relist resulted
+// in an HTTP 410 (Gone) status code, returns "" so that the relist will use the latest resource version available in
+// etcd via a quorum read.
+func (r *Reflector) relistResourceVersion() string {
+	r.lastSyncResourceVersionMutex.RLock()
+	defer r.lastSyncResourceVersionMutex.RUnlock()
+
+	if r.isLastSyncResourceVersionGone {
+		// Since this reflector makes paginated list requests, and all paginated list requests skip the watch cache
+		// if the lastSyncResourceVersion is expired, we set ResourceVersion="" and list again to re-establish reflector
+		// to the latest available ResourceVersion, using a consistent read from etcd.
+		return ""
+	}
+	if r.lastSyncResourceVersion == "" {
+		// For performance reasons, initial list performed by reflector uses "0" as resource version to allow it to
+		// be served from the watch cache if it is enabled.
+		return "0"
+	}
+	return r.lastSyncResourceVersion
+}
+
+// setIsLastSyncResourceVersionExpired sets if the last list or watch request with lastSyncResourceVersion returned a
+// expired error: HTTP 410 (Gone) Status Code.
+func (r *Reflector) setIsLastSyncResourceVersionExpired(isExpired bool) {
+	r.lastSyncResourceVersionMutex.Lock()
+	defer r.lastSyncResourceVersionMutex.Unlock()
+	r.isLastSyncResourceVersionGone = isExpired
+}
+
+func isExpiredError(err error) bool {
+	// In Kubernetes 1.17 and earlier, the api server returns both apierrors.StatusReasonExpired and
+	// apierrors.StatusReasonGone for HTTP 410 (Gone) status code responses. In 1.18 the kube server is more consistent
+	// and always returns apierrors.StatusReasonExpired. For backward compatibility we can only remove the apierrors.IsGone
+	// check when we fully drop support for Kubernetes 1.17 servers from reflectors.
+	return apierrors.IsResourceExpired(err) || apierrors.IsGone(err)
+}
diff --git a/vendor/k8s.io/client-go/tools/cache/shared_informer.go b/vendor/k8s.io/client-go/tools/cache/shared_informer.go
index f59a0852..df8c67dc 100644
--- a/vendor/k8s.io/client-go/tools/cache/shared_informer.go
+++ b/vendor/k8s.io/client-go/tools/cache/shared_informer.go
@@ -21,11 +21,11 @@ import (
 	"sync"
 	"time"
 
+	"k8s.io/apimachinery/pkg/api/meta"
 	"k8s.io/apimachinery/pkg/runtime"
 	"k8s.io/apimachinery/pkg/util/clock"
 	utilruntime "k8s.io/apimachinery/pkg/util/runtime"
 	"k8s.io/apimachinery/pkg/util/wait"
-	"k8s.io/client-go/util/retry"
 	"k8s.io/utils/buffer"
 
 	"k8s.io/klog"
@@ -46,15 +46,6 @@ import (
 // An object state is either "absent" or present with a
 // ResourceVersion and other appropriate content.
 //
-// A SharedInformer gets object states from apiservers using a
-// sequence of LIST and WATCH operations.  Through this sequence the
-// apiservers provide a sequence of "collection states" to the
-// informer, where each collection state defines the state of every
-// object of the collection.  No promise --- beyond what is implied by
-// other remarks here --- is made about how one informer's sequence of
-// collection states relates to a different informer's sequence of
-// collection states.
-//
 // A SharedInformer maintains a local cache, exposed by GetStore() and
 // by GetIndexer() in the case of an indexed informer, of the state of
 // each relevant object.  This cache is eventually consistent with the
@@ -67,10 +58,17 @@ import (
 // To be formally complete, we say that the absent state meets any
 // restriction by label selector or field selector.
 //
+// For a given informer and relevant object ID X, the sequence of
+// states that appears in the informer's cache is a subsequence of the
+// states authoritatively associated with X.  That is, some states
+// might never appear in the cache but ordering among the appearing
+// states is correct.  Note, however, that there is no promise about
+// ordering between states seen for different objects.
+//
 // The local cache starts out empty, and gets populated and updated
 // during `Run()`.
 //
-// As a simple example, if a collection of objects is henceforeth
+// As a simple example, if a collection of objects is henceforth
 // unchanging, a SharedInformer is created that links to that
 // collection, and that SharedInformer is `Run()` then that
 // SharedInformer's cache eventually holds an exact copy of that
@@ -91,6 +89,10 @@ import (
 // a given object, and `SplitMetaNamespaceKey(key)` to split a key
 // into its constituent parts.
 //
+// Every query against the local cache is answered entirely from one
+// snapshot of the cache's state.  Thus, the result of a `List` call
+// will not contain two entries with the same namespace and name.
+//
 // A client is identified here by a ResourceEventHandler.  For every
 // update to the SharedInformer's local cache and for every client
 // added before `Run()`, eventually either the SharedInformer is
@@ -106,7 +108,16 @@ import (
 // and index updates happen before such a prescribed notification.
 // For a given SharedInformer and client, the notifications are
 // delivered sequentially.  For a given SharedInformer, client, and
-// object ID, the notifications are delivered in order.
+// object ID, the notifications are delivered in order.  Because
+// `ObjectMeta.UID` has no role in identifying objects, it is possible
+// that when (1) object O1 with ID (e.g. namespace and name) X and
+// `ObjectMeta.UID` U1 in the SharedInformer's local cache is deleted
+// and later (2) another object O2 with ID X and ObjectMeta.UID U2 is
+// created the informer's clients are not notified of (1) and (2) but
+// rather are notified only of an update from O1 to O2. Clients that
+// need to detect such cases might do so by comparing the `ObjectMeta.UID`
+// field of the old and the new object in the code that handles update
+// notifications (i.e. `OnUpdate` method of ResourceEventHandler).
 //
 // A client must process each notification promptly; a SharedInformer
 // is not engineered to deal well with a large backlog of
@@ -114,11 +125,6 @@ import (
 // to something else, for example through a
 // `client-go/util/workqueue`.
 //
-// Each query to an informer's local cache --- whether a single-object
-// lookup, a list operation, or a use of one of its indices --- is
-// answered entirely from one of the collection states received by
-// that informer.
-//
 // A delete notification exposes the last locally known non-absent
 // state, except that its ResourceVersion is replaced with a
 // ResourceVersion in which the object is actually absent.
@@ -128,14 +134,23 @@ type SharedInformer interface {
 	// between different handlers.
 	AddEventHandler(handler ResourceEventHandler)
 	// AddEventHandlerWithResyncPeriod adds an event handler to the
-	// shared informer using the specified resync period.  The resync
-	// operation consists of delivering to the handler a create
-	// notification for every object in the informer's local cache; it
-	// does not add any interactions with the authoritative storage.
+	// shared informer with the requested resync period; zero means
+	// this handler does not care about resyncs.  The resync operation
+	// consists of delivering to the handler an update notification
+	// for every object in the informer's local cache; it does not add
+	// any interactions with the authoritative storage.  Some
+	// informers do no resyncs at all, not even for handlers added
+	// with a non-zero resyncPeriod.  For an informer that does
+	// resyncs, and for each handler that requests resyncs, that
+	// informer develops a nominal resync period that is no shorter
+	// than the requested period but may be longer.  The actual time
+	// between any two resyncs may be longer than the nominal period
+	// because the implementation takes time to do work and there may
+	// be competing load and scheduling noise.
 	AddEventHandlerWithResyncPeriod(handler ResourceEventHandler, resyncPeriod time.Duration)
 	// GetStore returns the informer's local cache as a Store.
 	GetStore() Store
-	// GetController gives back a synthetic interface that "votes" to start the informer
+	// GetController is deprecated, it does nothing useful
 	GetController() Controller
 	// Run starts and runs the shared informer, returning after it stops.
 	// The informer will be stopped when stopCh is closed.
@@ -159,21 +174,32 @@ type SharedIndexInformer interface {
 }
 
 // NewSharedInformer creates a new instance for the listwatcher.
-func NewSharedInformer(lw ListerWatcher, objType runtime.Object, resyncPeriod time.Duration) SharedInformer {
-	return NewSharedIndexInformer(lw, objType, resyncPeriod, Indexers{})
+func NewSharedInformer(lw ListerWatcher, exampleObject runtime.Object, defaultEventHandlerResyncPeriod time.Duration) SharedInformer {
+	return NewSharedIndexInformer(lw, exampleObject, defaultEventHandlerResyncPeriod, Indexers{})
 }
 
 // NewSharedIndexInformer creates a new instance for the listwatcher.
-func NewSharedIndexInformer(lw ListerWatcher, objType runtime.Object, defaultEventHandlerResyncPeriod time.Duration, indexers Indexers) SharedIndexInformer {
+// The created informer will not do resyncs if the given
+// defaultEventHandlerResyncPeriod is zero.  Otherwise: for each
+// handler that with a non-zero requested resync period, whether added
+// before or after the informer starts, the nominal resync period is
+// the requested resync period rounded up to a multiple of the
+// informer's resync checking period.  Such an informer's resync
+// checking period is established when the informer starts running,
+// and is the maximum of (a) the minimum of the resync periods
+// requested before the informer starts and the
+// defaultEventHandlerResyncPeriod given here and (b) the constant
+// `minimumResyncPeriod` defined in this file.
+func NewSharedIndexInformer(lw ListerWatcher, exampleObject runtime.Object, defaultEventHandlerResyncPeriod time.Duration, indexers Indexers) SharedIndexInformer {
 	realClock := &clock.RealClock{}
 	sharedIndexInformer := &sharedIndexInformer{
 		processor:                       &sharedProcessor{clock: realClock},
 		indexer:                         NewIndexer(DeletionHandlingMetaNamespaceKeyFunc, indexers),
 		listerWatcher:                   lw,
-		objectType:                      objType,
+		objectType:                      exampleObject,
 		resyncCheckPeriod:               defaultEventHandlerResyncPeriod,
 		defaultEventHandlerResyncPeriod: defaultEventHandlerResyncPeriod,
-		cacheMutationDetector:           NewCacheMutationDetector(fmt.Sprintf("%T", objType)),
+		cacheMutationDetector:           NewCacheMutationDetector(fmt.Sprintf("%T", exampleObject)),
 		clock:                           realClock,
 	}
 	return sharedIndexInformer
@@ -228,6 +254,19 @@ func WaitForCacheSync(stopCh <-chan struct{}, cacheSyncs ...InformerSynced) bool
 	return true
 }
 
+// `*sharedIndexInformer` implements SharedIndexInformer and has three
+// main components.  One is an indexed local cache, `indexer Indexer`.
+// The second main component is a Controller that pulls
+// objects/notifications using the ListerWatcher and pushes them into
+// a DeltaFIFO --- whose knownObjects is the informer's local cache
+// --- while concurrently Popping Deltas values from that fifo and
+// processing them with `sharedIndexInformer::HandleDeltas`.  Each
+// invocation of HandleDeltas, which is done with the fifo's lock
+// held, processes each Delta in turn.  For each Delta this both
+// updates the local cache and stuffs the relevant notification into
+// the sharedProcessor.  The third main component is that
+// sharedProcessor, which is responsible for relaying those
+// notifications to each of the informer's clients.
 type sharedIndexInformer struct {
 	indexer    Indexer
 	controller Controller
@@ -235,9 +274,13 @@ type sharedIndexInformer struct {
 	processor             *sharedProcessor
 	cacheMutationDetector MutationDetector
 
-	// This block is tracked to handle late initialization of the controller
 	listerWatcher ListerWatcher
-	objectType    runtime.Object
+
+	// objectType is an example object of the type this informer is
+	// expected to handle.  Only the type needs to be right, except
+	// that when that is `unstructured.Unstructured` the object's
+	// `"apiVersion"` and `"kind"` must also be right.
+	objectType runtime.Object
 
 	// resyncCheckPeriod is how often we want the reflector's resync timer to fire so it can call
 	// shouldResync to check if any of our listeners need a resync.
@@ -293,7 +336,10 @@ type deleteNotification struct {
 func (s *sharedIndexInformer) Run(stopCh <-chan struct{}) {
 	defer utilruntime.HandleCrash()
 
-	fifo := NewDeltaFIFO(MetaNamespaceKeyFunc, s.indexer)
+	fifo := NewDeltaFIFOWithOptions(DeltaFIFOOptions{
+		KnownObjects:          s.indexer,
+		EmitDeltaTypeReplaced: true,
+	})
 
 	cfg := &Config{
 		Queue:            fifo,
@@ -452,19 +498,33 @@ func (s *sharedIndexInformer) HandleDeltas(obj interface{}) error {
 	// from oldest to newest
 	for _, d := range obj.(Deltas) {
 		switch d.Type {
-		case Sync, Added, Updated:
-			isSync := d.Type == Sync
+		case Sync, Replaced, Added, Updated:
 			s.cacheMutationDetector.AddObject(d.Object)
 			if old, exists, err := s.indexer.Get(d.Object); err == nil && exists {
 				if err := s.indexer.Update(d.Object); err != nil {
 					return err
 				}
+
+				isSync := false
+				switch {
+				case d.Type == Sync:
+					// Sync events are only propagated to listeners that requested resync
+					isSync = true
+				case d.Type == Replaced:
+					if accessor, err := meta.Accessor(d.Object); err == nil {
+						if oldAccessor, err := meta.Accessor(old); err == nil {
+							// Replaced events that didn't change resourceVersion are treated as resync events
+							// and only propagated to listeners that requested resync
+							isSync = accessor.GetResourceVersion() == oldAccessor.GetResourceVersion()
+						}
+					}
+				}
 				s.processor.distribute(updateNotification{oldObj: old, newObj: d.Object}, isSync)
 			} else {
 				if err := s.indexer.Add(d.Object); err != nil {
 					return err
 				}
-				s.processor.distribute(addNotification{newObj: d.Object}, isSync)
+				s.processor.distribute(addNotification{newObj: d.Object}, false)
 			}
 		case Deleted:
 			if err := s.indexer.Delete(d.Object); err != nil {
@@ -476,6 +536,12 @@ func (s *sharedIndexInformer) HandleDeltas(obj interface{}) error {
 	return nil
 }
 
+// sharedProcessor has a collection of processorListener and can
+// distribute a notification object to its listeners.  There are two
+// kinds of distribute operations.  The sync distributions go to a
+// subset of the listeners that (a) is recomputed in the occasional
+// calls to shouldResync and (b) every listener is initially put in.
+// The non-sync distributions go to every listener.
 type sharedProcessor struct {
 	listenersStarted bool
 	listenersLock    sync.RWMutex
@@ -567,6 +633,17 @@ func (p *sharedProcessor) resyncCheckPeriodChanged(resyncCheckPeriod time.Durati
 	}
 }
 
+// processorListener relays notifications from a sharedProcessor to
+// one ResourceEventHandler --- using two goroutines, two unbuffered
+// channels, and an unbounded ring buffer.  The `add(notification)`
+// function sends the given notification to `addCh`.  One goroutine
+// runs `pop()`, which pumps notifications from `addCh` to `nextCh`
+// using storage in the ring buffer while `nextCh` is not keeping up.
+// Another goroutine runs `run()`, which receives notifications from
+// `nextCh` and synchronously invokes the appropriate handler method.
+//
+// processorListener also keeps track of the adjusted requested resync
+// period of the listener.
 type processorListener struct {
 	nextCh chan interface{}
 	addCh  chan interface{}
@@ -580,11 +657,22 @@ type processorListener struct {
 	// we should try to do something better.
 	pendingNotifications buffer.RingGrowing
 
-	// requestedResyncPeriod is how frequently the listener wants a full resync from the shared informer
+	// requestedResyncPeriod is how frequently the listener wants a
+	// full resync from the shared informer, but modified by two
+	// adjustments.  One is imposing a lower bound,
+	// `minimumResyncPeriod`.  The other is another lower bound, the
+	// sharedProcessor's `resyncCheckPeriod`, that is imposed (a) only
+	// in AddEventHandlerWithResyncPeriod invocations made after the
+	// sharedProcessor starts and (b) only if the informer does
+	// resyncs at all.
 	requestedResyncPeriod time.Duration
-	// resyncPeriod is how frequently the listener wants a full resync from the shared informer. This
-	// value may differ from requestedResyncPeriod if the shared informer adjusts it to align with the
-	// informer's overall resync check period.
+	// resyncPeriod is the threshold that will be used in the logic
+	// for this listener.  This value differs from
+	// requestedResyncPeriod only when the sharedIndexInformer does
+	// not do resyncs, in which case the value here is zero.  The
+	// actual time between resyncs depends on when the
+	// sharedProcessor's `shouldResync` function is invoked and when
+	// the sharedIndexInformer processes `Sync` type Delta objects.
 	resyncPeriod time.Duration
 	// nextResync is the earliest time the listener should get a full resync
 	nextResync time.Time
@@ -648,29 +736,21 @@ func (p *processorListener) run() {
 	// delivering again.
 	stopCh := make(chan struct{})
 	wait.Until(func() {
-		// this gives us a few quick retries before a long pause and then a few more quick retries
-		err := wait.ExponentialBackoff(retry.DefaultRetry, func() (bool, error) {
-			for next := range p.nextCh {
-				switch notification := next.(type) {
-				case updateNotification:
-					p.handler.OnUpdate(notification.oldObj, notification.newObj)
-				case addNotification:
-					p.handler.OnAdd(notification.newObj)
-				case deleteNotification:
-					p.handler.OnDelete(notification.oldObj)
-				default:
-					utilruntime.HandleError(fmt.Errorf("unrecognized notification: %T", next))
-				}
+		for next := range p.nextCh {
+			switch notification := next.(type) {
+			case updateNotification:
+				p.handler.OnUpdate(notification.oldObj, notification.newObj)
+			case addNotification:
+				p.handler.OnAdd(notification.newObj)
+			case deleteNotification:
+				p.handler.OnDelete(notification.oldObj)
+			default:
+				utilruntime.HandleError(fmt.Errorf("unrecognized notification: %T", next))
 			}
-			// the only way to get here is if the p.nextCh is empty and closed
-			return true, nil
-		})
-
-		// the only way to get here is if the p.nextCh is empty and closed
-		if err == nil {
-			close(stopCh)
 		}
-	}, 1*time.Minute, stopCh)
+		// the only way to get here is if the p.nextCh is empty and closed
+		close(stopCh)
+	}, 1*time.Second, stopCh)
 }
 
 // shouldResync deterimines if the listener needs a resync. If the listener's resyncPeriod is 0,
diff --git a/vendor/k8s.io/client-go/tools/cache/store.go b/vendor/k8s.io/client-go/tools/cache/store.go
index fc844efe..886e95d2 100644
--- a/vendor/k8s.io/client-go/tools/cache/store.go
+++ b/vendor/k8s.io/client-go/tools/cache/store.go
@@ -23,27 +23,50 @@ import (
 	"k8s.io/apimachinery/pkg/api/meta"
 )
 
-// Store is a generic object storage interface. Reflector knows how to watch a server
-// and update a store. A generic store is provided, which allows Reflector to be used
-// as a local caching system, and an LRU store, which allows Reflector to work like a
-// queue of items yet to be processed.
+// Store is a generic object storage and processing interface.  A
+// Store holds a map from string keys to accumulators, and has
+// operations to add, update, and delete a given object to/from the
+// accumulator currently associated with a given key.  A Store also
+// knows how to extract the key from a given object, so many operations
+// are given only the object.
 //
-// Store makes no assumptions about stored object identity; it is the responsibility
-// of a Store implementation to provide a mechanism to correctly key objects and to
-// define the contract for obtaining objects by some arbitrary key type.
+// In the simplest Store implementations each accumulator is simply
+// the last given object, or empty after Delete, and thus the Store's
+// behavior is simple storage.
+//
+// Reflector knows how to watch a server and update a Store.  This
+// package provides a variety of implementations of Store.
 type Store interface {
+
+	// Add adds the given object to the accumulator associated with the given object's key
 	Add(obj interface{}) error
+
+	// Update updates the given object in the accumulator associated with the given object's key
 	Update(obj interface{}) error
+
+	// Delete deletes the given object from the accumulator associated with the given object's key
 	Delete(obj interface{}) error
+
+	// List returns a list of all the currently non-empty accumulators
 	List() []interface{}
+
+	// ListKeys returns a list of all the keys currently associated with non-empty accumulators
 	ListKeys() []string
+
+	// Get returns the accumulator associated with the given object's key
 	Get(obj interface{}) (item interface{}, exists bool, err error)
+
+	// GetByKey returns the accumulator associated with the given key
 	GetByKey(key string) (item interface{}, exists bool, err error)
 
 	// Replace will delete the contents of the store, using instead the
 	// given list. Store takes ownership of the list, you should not reference
 	// it after calling this function.
 	Replace([]interface{}, string) error
+
+	// Resync is meaningless in the terms appearing here but has
+	// meaning in some implementations that have non-trivial
+	// additional behavior (e.g., DeltaFIFO).
 	Resync() error
 }
 
@@ -106,9 +129,8 @@ func SplitMetaNamespaceKey(key string) (namespace, name string, err error) {
 	return "", "", fmt.Errorf("unexpected key format: %q", key)
 }
 
-// cache responsibilities are limited to:
-//	1. Computing keys for objects via keyFunc
-//  2. Invoking methods of a ThreadSafeStorage interface
+// `*cache` implements Indexer in terms of a ThreadSafeStore and an
+// associated KeyFunc.
 type cache struct {
 	// cacheStorage bears the burden of thread safety for the cache
 	cacheStorage ThreadSafeStore
@@ -222,9 +244,9 @@ func (c *cache) Replace(list []interface{}, resourceVersion string) error {
 	return nil
 }
 
-// Resync touches all items in the store to force processing
+// Resync is meaningless for one of these
 func (c *cache) Resync() error {
-	return c.cacheStorage.Resync()
+	return nil
 }
 
 // NewStore returns a Store implemented simply with a map and a lock.
diff --git a/vendor/k8s.io/client-go/tools/cache/thread_safe_store.go b/vendor/k8s.io/client-go/tools/cache/thread_safe_store.go
index e7232514..56251179 100644
--- a/vendor/k8s.io/client-go/tools/cache/thread_safe_store.go
+++ b/vendor/k8s.io/client-go/tools/cache/thread_safe_store.go
@@ -23,7 +23,11 @@ import (
 	"k8s.io/apimachinery/pkg/util/sets"
 )
 
-// ThreadSafeStore is an interface that allows concurrent access to a storage backend.
+// ThreadSafeStore is an interface that allows concurrent indexed
+// access to a storage backend.  It is like Indexer but does not
+// (necessarily) know how to extract the Store key from a given
+// object.
+//
 // TL;DR caveats: you must not modify anything returned by Get or List as it will break
 // the indexing feature in addition to not being thread safe.
 //
@@ -51,6 +55,7 @@ type ThreadSafeStore interface {
 	// AddIndexers adds more indexers to this store.  If you call this after you already have data
 	// in the store, the results are undefined.
 	AddIndexers(newIndexers Indexers) error
+	// Resync is a no-op and is deprecated
 	Resync() error
 }
 
@@ -131,8 +136,8 @@ func (c *threadSafeMap) Replace(items map[string]interface{}, resourceVersion st
 	}
 }
 
-// Index returns a list of items that match on the index function
-// Index is thread-safe so long as you treat all items as immutable
+// Index returns a list of items that match the given object on the index function.
+// Index is thread-safe so long as you treat all items as immutable.
 func (c *threadSafeMap) Index(indexName string, obj interface{}) ([]interface{}, error) {
 	c.lock.RLock()
 	defer c.lock.RUnlock()
@@ -142,37 +147,37 @@ func (c *threadSafeMap) Index(indexName string, obj interface{}) ([]interface{},
 		return nil, fmt.Errorf("Index with name %s does not exist", indexName)
 	}
 
-	indexKeys, err := indexFunc(obj)
+	indexedValues, err := indexFunc(obj)
 	if err != nil {
 		return nil, err
 	}
 	index := c.indices[indexName]
 
-	var returnKeySet sets.String
-	if len(indexKeys) == 1 {
+	var storeKeySet sets.String
+	if len(indexedValues) == 1 {
 		// In majority of cases, there is exactly one value matching.
 		// Optimize the most common path - deduping is not needed here.
-		returnKeySet = index[indexKeys[0]]
+		storeKeySet = index[indexedValues[0]]
 	} else {
 		// Need to de-dupe the return list.
 		// Since multiple keys are allowed, this can happen.
-		returnKeySet = sets.String{}
-		for _, indexKey := range indexKeys {
-			for key := range index[indexKey] {
-				returnKeySet.Insert(key)
+		storeKeySet = sets.String{}
+		for _, indexedValue := range indexedValues {
+			for key := range index[indexedValue] {
+				storeKeySet.Insert(key)
 			}
 		}
 	}
 
-	list := make([]interface{}, 0, returnKeySet.Len())
-	for absoluteKey := range returnKeySet {
-		list = append(list, c.items[absoluteKey])
+	list := make([]interface{}, 0, storeKeySet.Len())
+	for storeKey := range storeKeySet {
+		list = append(list, c.items[storeKey])
 	}
 	return list, nil
 }
 
-// ByIndex returns a list of items that match an exact value on the index function
-func (c *threadSafeMap) ByIndex(indexName, indexKey string) ([]interface{}, error) {
+// ByIndex returns a list of the items whose indexed values in the given index include the given indexed value
+func (c *threadSafeMap) ByIndex(indexName, indexedValue string) ([]interface{}, error) {
 	c.lock.RLock()
 	defer c.lock.RUnlock()
 
@@ -183,7 +188,7 @@ func (c *threadSafeMap) ByIndex(indexName, indexKey string) ([]interface{}, erro
 
 	index := c.indices[indexName]
 
-	set := index[indexKey]
+	set := index[indexedValue]
 	list := make([]interface{}, 0, set.Len())
 	for key := range set {
 		list = append(list, c.items[key])
@@ -192,9 +197,9 @@ func (c *threadSafeMap) ByIndex(indexName, indexKey string) ([]interface{}, erro
 	return list, nil
 }
 
-// IndexKeys returns a list of keys that match on the index function.
+// IndexKeys returns a list of the Store keys of the objects whose indexed values in the given index include the given indexed value.
 // IndexKeys is thread-safe so long as you treat all items as immutable.
-func (c *threadSafeMap) IndexKeys(indexName, indexKey string) ([]string, error) {
+func (c *threadSafeMap) IndexKeys(indexName, indexedValue string) ([]string, error) {
 	c.lock.RLock()
 	defer c.lock.RUnlock()
 
@@ -205,7 +210,7 @@ func (c *threadSafeMap) IndexKeys(indexName, indexKey string) ([]string, error)
 
 	index := c.indices[indexName]
 
-	set := index[indexKey]
+	set := index[indexedValue]
 	return set.List(), nil
 }
 
diff --git a/vendor/k8s.io/client-go/tools/clientcmd/api/types.go b/vendor/k8s.io/client-go/tools/clientcmd/api/types.go
index 1f1209f8..44317dd0 100644
--- a/vendor/k8s.io/client-go/tools/clientcmd/api/types.go
+++ b/vendor/k8s.io/client-go/tools/clientcmd/api/types.go
@@ -70,6 +70,9 @@ type Cluster struct {
 	LocationOfOrigin string
 	// Server is the address of the kubernetes cluster (https://hostname:port).
 	Server string `json:"server"`
+	// TLSServerName is used to check server certificate. If TLSServerName is empty, the hostname used to contact the server is used.
+	// +optional
+	TLSServerName string `json:"tls-server-name,omitempty"`
 	// InsecureSkipTLSVerify skips the validity check for the server's certificate. This will make your HTTPS connections insecure.
 	// +optional
 	InsecureSkipTLSVerify bool `json:"insecure-skip-tls-verify,omitempty"`
diff --git a/vendor/k8s.io/client-go/tools/clientcmd/api/v1/types.go b/vendor/k8s.io/client-go/tools/clientcmd/api/v1/types.go
index 2159ffc7..8ccacd3f 100644
--- a/vendor/k8s.io/client-go/tools/clientcmd/api/v1/types.go
+++ b/vendor/k8s.io/client-go/tools/clientcmd/api/v1/types.go
@@ -63,6 +63,9 @@ type Preferences struct {
 type Cluster struct {
 	// Server is the address of the kubernetes cluster (https://hostname:port).
 	Server string `json:"server"`
+	// TLSServerName is used to check server certificate. If TLSServerName is empty, the hostname used to contact the server is used.
+	// +optional
+	TLSServerName string `json:"tls-server-name,omitempty"`
 	// InsecureSkipTLSVerify skips the validity check for the server's certificate. This will make your HTTPS connections insecure.
 	// +optional
 	InsecureSkipTLSVerify bool `json:"insecure-skip-tls-verify,omitempty"`
diff --git a/vendor/k8s.io/client-go/tools/clientcmd/api/v1/zz_generated.conversion.go b/vendor/k8s.io/client-go/tools/clientcmd/api/v1/zz_generated.conversion.go
index 31e00ea6..8f3631e1 100644
--- a/vendor/k8s.io/client-go/tools/clientcmd/api/v1/zz_generated.conversion.go
+++ b/vendor/k8s.io/client-go/tools/clientcmd/api/v1/zz_generated.conversion.go
@@ -233,6 +233,7 @@ func Convert_api_AuthProviderConfig_To_v1_AuthProviderConfig(in *api.AuthProvide
 
 func autoConvert_v1_Cluster_To_api_Cluster(in *Cluster, out *api.Cluster, s conversion.Scope) error {
 	out.Server = in.Server
+	out.TLSServerName = in.TLSServerName
 	out.InsecureSkipTLSVerify = in.InsecureSkipTLSVerify
 	out.CertificateAuthority = in.CertificateAuthority
 	out.CertificateAuthorityData = *(*[]byte)(unsafe.Pointer(&in.CertificateAuthorityData))
@@ -250,6 +251,7 @@ func Convert_v1_Cluster_To_api_Cluster(in *Cluster, out *api.Cluster, s conversi
 func autoConvert_api_Cluster_To_v1_Cluster(in *api.Cluster, out *Cluster, s conversion.Scope) error {
 	// INFO: in.LocationOfOrigin opted out of conversion generation
 	out.Server = in.Server
+	out.TLSServerName = in.TLSServerName
 	out.InsecureSkipTLSVerify = in.InsecureSkipTLSVerify
 	out.CertificateAuthority = in.CertificateAuthority
 	out.CertificateAuthorityData = *(*[]byte)(unsafe.Pointer(&in.CertificateAuthorityData))
diff --git a/vendor/k8s.io/client-go/tools/clientcmd/client_config.go b/vendor/k8s.io/client-go/tools/clientcmd/client_config.go
index 44115130..5096f51d 100644
--- a/vendor/k8s.io/client-go/tools/clientcmd/client_config.go
+++ b/vendor/k8s.io/client-go/tools/clientcmd/client_config.go
@@ -35,7 +35,7 @@ import (
 var (
 	// ClusterDefaults has the same behavior as the old EnvVar and DefaultCluster fields
 	// DEPRECATED will be replaced
-	ClusterDefaults = clientcmdapi.Cluster{Server: getDefaultServer()}
+	ClusterDefaults = clientcmdapi.Cluster{Server: os.Getenv("KUBERNETES_MASTER")}
 	// DefaultClientConfig represents the legacy behavior of this package for defaulting
 	// DEPRECATED will be replace
 	DefaultClientConfig = DirectClientConfig{*clientcmdapi.NewConfig(), "", &ConfigOverrides{
@@ -43,15 +43,6 @@ var (
 	}, nil, NewDefaultClientConfigLoadingRules(), promptedCredentials{}}
 )
 
-// getDefaultServer returns a default setting for DefaultClientConfig
-// DEPRECATED
-func getDefaultServer() string {
-	if server := os.Getenv("KUBERNETES_MASTER"); len(server) > 0 {
-		return server
-	}
-	return "http://localhost:8080"
-}
-
 // ClientConfig is used to make it easy to get an api server client
 type ClientConfig interface {
 	// RawConfig returns the merged result of all overrides
@@ -210,6 +201,7 @@ func getServerIdentificationPartialConfig(configAuthInfo clientcmdapi.AuthInfo,
 	configClientConfig.CAFile = configClusterInfo.CertificateAuthority
 	configClientConfig.CAData = configClusterInfo.CertificateAuthorityData
 	configClientConfig.Insecure = configClusterInfo.InsecureSkipTLSVerify
+	configClientConfig.ServerName = configClusterInfo.TLSServerName
 	mergo.MergeWithOverwrite(mergedConfig, configClientConfig)
 
 	return mergedConfig, nil
@@ -460,6 +452,14 @@ func (config *DirectClientConfig) getCluster() (clientcmdapi.Cluster, error) {
 		mergedClusterInfo.CertificateAuthorityData = config.overrides.ClusterInfo.CertificateAuthorityData
 	}
 
+	// if the --tls-server-name has been set in overrides, use that value.
+	// if the --server has been set in overrides, then use the value of --tls-server-name specified on the CLI too.  This gives the property
+	// that setting a --server will effectively clear the KUBECONFIG value of tls-server-name if it is specified on the command line which is
+	// usually correct.
+	if config.overrides.ClusterInfo.TLSServerName != "" || config.overrides.ClusterInfo.Server != "" {
+		mergedClusterInfo.TLSServerName = config.overrides.ClusterInfo.TLSServerName
+	}
+
 	return *mergedClusterInfo, nil
 }
 
diff --git a/vendor/k8s.io/client-go/tools/clientcmd/overrides.go b/vendor/k8s.io/client-go/tools/clientcmd/overrides.go
index bfca0328..95cba0fa 100644
--- a/vendor/k8s.io/client-go/tools/clientcmd/overrides.go
+++ b/vendor/k8s.io/client-go/tools/clientcmd/overrides.go
@@ -71,6 +71,7 @@ type ClusterOverrideFlags struct {
 	APIVersion            FlagInfo
 	CertificateAuthority  FlagInfo
 	InsecureSkipTLSVerify FlagInfo
+	TLSServerName         FlagInfo
 }
 
 // FlagInfo contains information about how to register a flag.  This struct is useful if you want to provide a way for an extender to
@@ -145,6 +146,7 @@ const (
 	FlagContext          = "context"
 	FlagNamespace        = "namespace"
 	FlagAPIServer        = "server"
+	FlagTLSServerName    = "tls-server-name"
 	FlagInsecure         = "insecure-skip-tls-verify"
 	FlagCertFile         = "client-certificate"
 	FlagKeyFile          = "client-key"
@@ -189,6 +191,7 @@ func RecommendedClusterOverrideFlags(prefix string) ClusterOverrideFlags {
 		APIServer:             FlagInfo{prefix + FlagAPIServer, "", "", "The address and port of the Kubernetes API server"},
 		CertificateAuthority:  FlagInfo{prefix + FlagCAFile, "", "", "Path to a cert file for the certificate authority"},
 		InsecureSkipTLSVerify: FlagInfo{prefix + FlagInsecure, "", "false", "If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure"},
+		TLSServerName:         FlagInfo{prefix + FlagTLSServerName, "", "", "If provided, this name will be used to validate server certificate. If this is not provided, hostname used to contact the server is used."},
 	}
 }
 
@@ -226,6 +229,7 @@ func BindClusterFlags(clusterInfo *clientcmdapi.Cluster, flags *pflag.FlagSet, f
 	flagNames.APIServer.BindStringFlag(flags, &clusterInfo.Server)
 	flagNames.CertificateAuthority.BindStringFlag(flags, &clusterInfo.CertificateAuthority)
 	flagNames.InsecureSkipTLSVerify.BindBoolFlag(flags, &clusterInfo.InsecureSkipTLSVerify)
+	flagNames.TLSServerName.BindStringFlag(flags, &clusterInfo.TLSServerName)
 }
 
 // BindFlags is a convenience method to bind the specified flags to their associated variables
diff --git a/vendor/k8s.io/client-go/tools/clientcmd/validation.go b/vendor/k8s.io/client-go/tools/clientcmd/validation.go
index 2f927072..afe6f80b 100644
--- a/vendor/k8s.io/client-go/tools/clientcmd/validation.go
+++ b/vendor/k8s.io/client-go/tools/clientcmd/validation.go
@@ -30,7 +30,7 @@ import (
 
 var (
 	ErrNoContext   = errors.New("no context chosen")
-	ErrEmptyConfig = errors.New("no configuration has been provided")
+	ErrEmptyConfig = errors.New("no configuration has been provided, try setting KUBERNETES_MASTER environment variable")
 	// message is for consistency with old behavior
 	ErrEmptyCluster = errors.New("cluster has no server defined")
 )
@@ -86,11 +86,41 @@ func (e errConfigurationInvalid) Error() string {
 	return fmt.Sprintf("invalid configuration: %v", utilerrors.NewAggregate(e).Error())
 }
 
-// Errors implements the AggregateError interface
+// Errors implements the utilerrors.Aggregate interface
 func (e errConfigurationInvalid) Errors() []error {
 	return e
 }
 
+// Is implements the utilerrors.Aggregate interface
+func (e errConfigurationInvalid) Is(target error) bool {
+	return e.visit(func(err error) bool {
+		return errors.Is(err, target)
+	})
+}
+
+func (e errConfigurationInvalid) visit(f func(err error) bool) bool {
+	for _, err := range e {
+		switch err := err.(type) {
+		case errConfigurationInvalid:
+			if match := err.visit(f); match {
+				return match
+			}
+		case utilerrors.Aggregate:
+			for _, nestedErr := range err.Errors() {
+				if match := f(nestedErr); match {
+					return match
+				}
+			}
+		default:
+			if match := f(err); match {
+				return match
+			}
+		}
+	}
+
+	return false
+}
+
 // IsConfigurationInvalid returns true if the provided error indicates the configuration is invalid.
 func IsConfigurationInvalid(err error) bool {
 	switch err.(type) {
diff --git a/vendor/k8s.io/client-go/tools/metrics/metrics.go b/vendor/k8s.io/client-go/tools/metrics/metrics.go
index a01306c6..5194026b 100644
--- a/vendor/k8s.io/client-go/tools/metrics/metrics.go
+++ b/vendor/k8s.io/client-go/tools/metrics/metrics.go
@@ -26,6 +26,16 @@ import (
 
 var registerMetrics sync.Once
 
+// DurationMetric is a measurement of some amount of time.
+type DurationMetric interface {
+	Observe(duration time.Duration)
+}
+
+// ExpiryMetric sets some time of expiry. If nil, assume not relevant.
+type ExpiryMetric interface {
+	Set(expiry *time.Time)
+}
+
 // LatencyMetric observes client latency partitioned by verb and url.
 type LatencyMetric interface {
 	Observe(verb string, u url.URL, latency time.Duration)
@@ -37,21 +47,57 @@ type ResultMetric interface {
 }
 
 var (
+	// ClientCertExpiry is the expiry time of a client certificate
+	ClientCertExpiry ExpiryMetric = noopExpiry{}
+	// ClientCertRotationAge is the age of a certificate that has just been rotated.
+	ClientCertRotationAge DurationMetric = noopDuration{}
 	// RequestLatency is the latency metric that rest clients will update.
 	RequestLatency LatencyMetric = noopLatency{}
+	// RateLimiterLatency is the client side rate limiter latency metric.
+	RateLimiterLatency LatencyMetric = noopLatency{}
 	// RequestResult is the result metric that rest clients will update.
 	RequestResult ResultMetric = noopResult{}
 )
 
+// RegisterOpts contains all the metrics to register. Metrics may be nil.
+type RegisterOpts struct {
+	ClientCertExpiry      ExpiryMetric
+	ClientCertRotationAge DurationMetric
+	RequestLatency        LatencyMetric
+	RateLimiterLatency    LatencyMetric
+	RequestResult         ResultMetric
+}
+
 // Register registers metrics for the rest client to use. This can
 // only be called once.
-func Register(lm LatencyMetric, rm ResultMetric) {
+func Register(opts RegisterOpts) {
 	registerMetrics.Do(func() {
-		RequestLatency = lm
-		RequestResult = rm
+		if opts.ClientCertExpiry != nil {
+			ClientCertExpiry = opts.ClientCertExpiry
+		}
+		if opts.ClientCertRotationAge != nil {
+			ClientCertRotationAge = opts.ClientCertRotationAge
+		}
+		if opts.RequestLatency != nil {
+			RequestLatency = opts.RequestLatency
+		}
+		if opts.RateLimiterLatency != nil {
+			RateLimiterLatency = opts.RateLimiterLatency
+		}
+		if opts.RequestResult != nil {
+			RequestResult = opts.RequestResult
+		}
 	})
 }
 
+type noopDuration struct{}
+
+func (noopDuration) Observe(time.Duration) {}
+
+type noopExpiry struct{}
+
+func (noopExpiry) Set(*time.Time) {}
+
 type noopLatency struct{}
 
 func (noopLatency) Observe(string, url.URL, time.Duration) {}
diff --git a/vendor/k8s.io/client-go/tools/pager/pager.go b/vendor/k8s.io/client-go/tools/pager/pager.go
index 307808be..f6c6a012 100644
--- a/vendor/k8s.io/client-go/tools/pager/pager.go
+++ b/vendor/k8s.io/client-go/tools/pager/pager.go
@@ -73,16 +73,18 @@ func New(fn ListPageFunc) *ListPager {
 // List returns a single list object, but attempts to retrieve smaller chunks from the
 // server to reduce the impact on the server. If the chunk attempt fails, it will load
 // the full list instead. The Limit field on options, if unset, will default to the page size.
-func (p *ListPager) List(ctx context.Context, options metav1.ListOptions) (runtime.Object, error) {
+func (p *ListPager) List(ctx context.Context, options metav1.ListOptions) (runtime.Object, bool, error) {
 	if options.Limit == 0 {
 		options.Limit = p.PageSize
 	}
 	requestedResourceVersion := options.ResourceVersion
 	var list *metainternalversion.List
+	paginatedResult := false
+
 	for {
 		select {
 		case <-ctx.Done():
-			return nil, ctx.Err()
+			return nil, paginatedResult, ctx.Err()
 		default:
 		}
 
@@ -93,23 +95,24 @@ func (p *ListPager) List(ctx context.Context, options metav1.ListOptions) (runti
 			// failing when the resource versions is established by the first page request falls out of the compaction
 			// during the subsequent list requests).
 			if !errors.IsResourceExpired(err) || !p.FullListIfExpired || options.Continue == "" {
-				return nil, err
+				return nil, paginatedResult, err
 			}
 			// the list expired while we were processing, fall back to a full list at
 			// the requested ResourceVersion.
 			options.Limit = 0
 			options.Continue = ""
 			options.ResourceVersion = requestedResourceVersion
-			return p.PageFn(ctx, options)
+			result, err := p.PageFn(ctx, options)
+			return result, paginatedResult, err
 		}
 		m, err := meta.ListAccessor(obj)
 		if err != nil {
-			return nil, fmt.Errorf("returned object must be a list: %v", err)
+			return nil, paginatedResult, fmt.Errorf("returned object must be a list: %v", err)
 		}
 
 		// exit early and return the object we got if we haven't processed any pages
 		if len(m.GetContinue()) == 0 && list == nil {
-			return obj, nil
+			return obj, paginatedResult, nil
 		}
 
 		// initialize the list and fill its contents
@@ -122,12 +125,12 @@ func (p *ListPager) List(ctx context.Context, options metav1.ListOptions) (runti
 			list.Items = append(list.Items, obj)
 			return nil
 		}); err != nil {
-			return nil, err
+			return nil, paginatedResult, err
 		}
 
 		// if we have no more items, return the list
 		if len(m.GetContinue()) == 0 {
-			return list, nil
+			return list, paginatedResult, nil
 		}
 
 		// set the next loop up
@@ -136,6 +139,8 @@ func (p *ListPager) List(ctx context.Context, options metav1.ListOptions) (runti
 		// `specifying resource version is not allowed when using continue` error.
 		// See https://github.com/kubernetes/kubernetes/issues/85221#issuecomment-553748143.
 		options.ResourceVersion = ""
+		// At this point, result is already paginated.
+		paginatedResult = true
 	}
 }
 
diff --git a/vendor/k8s.io/client-go/tools/remotecommand/doc.go b/vendor/k8s.io/client-go/tools/remotecommand/doc.go
new file mode 100644
index 00000000..ac06a9cd
--- /dev/null
+++ b/vendor/k8s.io/client-go/tools/remotecommand/doc.go
@@ -0,0 +1,20 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package remotecommand adds support for executing commands in containers,
+// with support for separate stdin, stdout, and stderr streams, as well as
+// TTY.
+package remotecommand // import "k8s.io/client-go/tools/remotecommand"
diff --git a/vendor/k8s.io/client-go/tools/remotecommand/errorstream.go b/vendor/k8s.io/client-go/tools/remotecommand/errorstream.go
new file mode 100644
index 00000000..360276b6
--- /dev/null
+++ b/vendor/k8s.io/client-go/tools/remotecommand/errorstream.go
@@ -0,0 +1,55 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package remotecommand
+
+import (
+	"fmt"
+	"io"
+	"io/ioutil"
+
+	"k8s.io/apimachinery/pkg/util/runtime"
+)
+
+// errorStreamDecoder interprets the data on the error channel and creates a go error object from it.
+type errorStreamDecoder interface {
+	decode(message []byte) error
+}
+
+// watchErrorStream watches the errorStream for remote command error data,
+// decodes it with the given errorStreamDecoder, sends the decoded error (or nil if the remote
+// command exited successfully) to the returned error channel, and closes it.
+// This function returns immediately.
+func watchErrorStream(errorStream io.Reader, d errorStreamDecoder) chan error {
+	errorChan := make(chan error)
+
+	go func() {
+		defer runtime.HandleCrash()
+
+		message, err := ioutil.ReadAll(errorStream)
+		switch {
+		case err != nil && err != io.EOF:
+			errorChan <- fmt.Errorf("error reading from error stream: %s", err)
+		case len(message) > 0:
+			errorChan <- d.decode(message)
+		default:
+			errorChan <- nil
+		}
+		close(errorChan)
+	}()
+
+	return errorChan
+}
diff --git a/vendor/k8s.io/client-go/tools/remotecommand/reader.go b/vendor/k8s.io/client-go/tools/remotecommand/reader.go
new file mode 100644
index 00000000..d1f1be34
--- /dev/null
+++ b/vendor/k8s.io/client-go/tools/remotecommand/reader.go
@@ -0,0 +1,41 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package remotecommand
+
+import (
+	"io"
+)
+
+// readerWrapper delegates to an io.Reader so that only the io.Reader interface is implemented,
+// to keep io.Copy from doing things we don't want when copying from the reader to the data stream.
+//
+// If the Stdin io.Reader provided to remotecommand implements a WriteTo function (like bytes.Buffer does[1]),
+// io.Copy calls that method[2] to attempt to write the entire buffer to the stream in one call.
+// That results in an oversized call to spdystream.Stream#Write [3],
+// which results in a single oversized data frame[4] that is too large.
+//
+// [1] https://golang.org/pkg/bytes/#Buffer.WriteTo
+// [2] https://golang.org/pkg/io/#Copy
+// [3] https://github.com/kubernetes/kubernetes/blob/90295640ef87db9daa0144c5617afe889e7992b2/vendor/github.com/docker/spdystream/stream.go#L66-L73
+// [4] https://github.com/kubernetes/kubernetes/blob/90295640ef87db9daa0144c5617afe889e7992b2/vendor/github.com/docker/spdystream/spdy/write.go#L302-L304
+type readerWrapper struct {
+	reader io.Reader
+}
+
+func (r readerWrapper) Read(p []byte) (int, error) {
+	return r.reader.Read(p)
+}
diff --git a/vendor/k8s.io/client-go/tools/remotecommand/remotecommand.go b/vendor/k8s.io/client-go/tools/remotecommand/remotecommand.go
new file mode 100644
index 00000000..892d8d10
--- /dev/null
+++ b/vendor/k8s.io/client-go/tools/remotecommand/remotecommand.go
@@ -0,0 +1,142 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package remotecommand
+
+import (
+	"fmt"
+	"io"
+	"net/http"
+	"net/url"
+
+	"k8s.io/klog"
+
+	"k8s.io/apimachinery/pkg/util/httpstream"
+	"k8s.io/apimachinery/pkg/util/remotecommand"
+	restclient "k8s.io/client-go/rest"
+	spdy "k8s.io/client-go/transport/spdy"
+)
+
+// StreamOptions holds information pertaining to the current streaming session:
+// input/output streams, if the client is requesting a TTY, and a terminal size queue to
+// support terminal resizing.
+type StreamOptions struct {
+	Stdin             io.Reader
+	Stdout            io.Writer
+	Stderr            io.Writer
+	Tty               bool
+	TerminalSizeQueue TerminalSizeQueue
+}
+
+// Executor is an interface for transporting shell-style streams.
+type Executor interface {
+	// Stream initiates the transport of the standard shell streams. It will transport any
+	// non-nil stream to a remote system, and return an error if a problem occurs. If tty
+	// is set, the stderr stream is not used (raw TTY manages stdout and stderr over the
+	// stdout stream).
+	Stream(options StreamOptions) error
+}
+
+type streamCreator interface {
+	CreateStream(headers http.Header) (httpstream.Stream, error)
+}
+
+type streamProtocolHandler interface {
+	stream(conn streamCreator) error
+}
+
+// streamExecutor handles transporting standard shell streams over an httpstream connection.
+type streamExecutor struct {
+	upgrader  spdy.Upgrader
+	transport http.RoundTripper
+
+	method    string
+	url       *url.URL
+	protocols []string
+}
+
+// NewSPDYExecutor connects to the provided server and upgrades the connection to
+// multiplexed bidirectional streams.
+func NewSPDYExecutor(config *restclient.Config, method string, url *url.URL) (Executor, error) {
+	wrapper, upgradeRoundTripper, err := spdy.RoundTripperFor(config)
+	if err != nil {
+		return nil, err
+	}
+	return NewSPDYExecutorForTransports(wrapper, upgradeRoundTripper, method, url)
+}
+
+// NewSPDYExecutorForTransports connects to the provided server using the given transport,
+// upgrades the response using the given upgrader to multiplexed bidirectional streams.
+func NewSPDYExecutorForTransports(transport http.RoundTripper, upgrader spdy.Upgrader, method string, url *url.URL) (Executor, error) {
+	return NewSPDYExecutorForProtocols(
+		transport, upgrader, method, url,
+		remotecommand.StreamProtocolV4Name,
+		remotecommand.StreamProtocolV3Name,
+		remotecommand.StreamProtocolV2Name,
+		remotecommand.StreamProtocolV1Name,
+	)
+}
+
+// NewSPDYExecutorForProtocols connects to the provided server and upgrades the connection to
+// multiplexed bidirectional streams using only the provided protocols. Exposed for testing, most
+// callers should use NewSPDYExecutor or NewSPDYExecutorForTransports.
+func NewSPDYExecutorForProtocols(transport http.RoundTripper, upgrader spdy.Upgrader, method string, url *url.URL, protocols ...string) (Executor, error) {
+	return &streamExecutor{
+		upgrader:  upgrader,
+		transport: transport,
+		method:    method,
+		url:       url,
+		protocols: protocols,
+	}, nil
+}
+
+// Stream opens a protocol streamer to the server and streams until a client closes
+// the connection or the server disconnects.
+func (e *streamExecutor) Stream(options StreamOptions) error {
+	req, err := http.NewRequest(e.method, e.url.String(), nil)
+	if err != nil {
+		return fmt.Errorf("error creating request: %v", err)
+	}
+
+	conn, protocol, err := spdy.Negotiate(
+		e.upgrader,
+		&http.Client{Transport: e.transport},
+		req,
+		e.protocols...,
+	)
+	if err != nil {
+		return err
+	}
+	defer conn.Close()
+
+	var streamer streamProtocolHandler
+
+	switch protocol {
+	case remotecommand.StreamProtocolV4Name:
+		streamer = newStreamProtocolV4(options)
+	case remotecommand.StreamProtocolV3Name:
+		streamer = newStreamProtocolV3(options)
+	case remotecommand.StreamProtocolV2Name:
+		streamer = newStreamProtocolV2(options)
+	case "":
+		klog.V(4).Infof("The server did not negotiate a streaming protocol version. Falling back to %s", remotecommand.StreamProtocolV1Name)
+		fallthrough
+	case remotecommand.StreamProtocolV1Name:
+		streamer = newStreamProtocolV1(options)
+	}
+
+	return streamer.stream(conn)
+}
diff --git a/vendor/k8s.io/client-go/tools/remotecommand/resize.go b/vendor/k8s.io/client-go/tools/remotecommand/resize.go
new file mode 100644
index 00000000..c838f21b
--- /dev/null
+++ b/vendor/k8s.io/client-go/tools/remotecommand/resize.go
@@ -0,0 +1,33 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package remotecommand
+
+// TerminalSize and TerminalSizeQueue was a part of k8s.io/kubernetes/pkg/util/term
+// and were moved in order to decouple client from other term dependencies
+
+// TerminalSize represents the width and height of a terminal.
+type TerminalSize struct {
+	Width  uint16
+	Height uint16
+}
+
+// TerminalSizeQueue is capable of returning terminal resize events as they occur.
+type TerminalSizeQueue interface {
+	// Next returns the new terminal size after the terminal has been resized. It returns nil when
+	// monitoring has been stopped.
+	Next() *TerminalSize
+}
diff --git a/vendor/k8s.io/client-go/tools/remotecommand/v1.go b/vendor/k8s.io/client-go/tools/remotecommand/v1.go
new file mode 100644
index 00000000..4120f1f5
--- /dev/null
+++ b/vendor/k8s.io/client-go/tools/remotecommand/v1.go
@@ -0,0 +1,160 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package remotecommand
+
+import (
+	"fmt"
+	"io"
+	"io/ioutil"
+	"net/http"
+
+	"k8s.io/api/core/v1"
+	"k8s.io/apimachinery/pkg/util/httpstream"
+	"k8s.io/klog"
+)
+
+// streamProtocolV1 implements the first version of the streaming exec & attach
+// protocol. This version has some bugs, such as not being able to detect when
+// non-interactive stdin data has ended. See http://issues.k8s.io/13394 and
+// http://issues.k8s.io/13395 for more details.
+type streamProtocolV1 struct {
+	StreamOptions
+
+	errorStream  httpstream.Stream
+	remoteStdin  httpstream.Stream
+	remoteStdout httpstream.Stream
+	remoteStderr httpstream.Stream
+}
+
+var _ streamProtocolHandler = &streamProtocolV1{}
+
+func newStreamProtocolV1(options StreamOptions) streamProtocolHandler {
+	return &streamProtocolV1{
+		StreamOptions: options,
+	}
+}
+
+func (p *streamProtocolV1) stream(conn streamCreator) error {
+	doneChan := make(chan struct{}, 2)
+	errorChan := make(chan error)
+
+	cp := func(s string, dst io.Writer, src io.Reader) {
+		klog.V(6).Infof("Copying %s", s)
+		defer klog.V(6).Infof("Done copying %s", s)
+		if _, err := io.Copy(dst, src); err != nil && err != io.EOF {
+			klog.Errorf("Error copying %s: %v", s, err)
+		}
+		if s == v1.StreamTypeStdout || s == v1.StreamTypeStderr {
+			doneChan <- struct{}{}
+		}
+	}
+
+	// set up all the streams first
+	var err error
+	headers := http.Header{}
+	headers.Set(v1.StreamType, v1.StreamTypeError)
+	p.errorStream, err = conn.CreateStream(headers)
+	if err != nil {
+		return err
+	}
+	defer p.errorStream.Reset()
+
+	// Create all the streams first, then start the copy goroutines. The server doesn't start its copy
+	// goroutines until it's received all of the streams. If the client creates the stdin stream and
+	// immediately begins copying stdin data to the server, it's possible to overwhelm and wedge the
+	// spdy frame handler in the server so that it is full of unprocessed frames. The frames aren't
+	// getting processed because the server hasn't started its copying, and it won't do that until it
+	// gets all the streams. By creating all the streams first, we ensure that the server is ready to
+	// process data before the client starts sending any. See https://issues.k8s.io/16373 for more info.
+	if p.Stdin != nil {
+		headers.Set(v1.StreamType, v1.StreamTypeStdin)
+		p.remoteStdin, err = conn.CreateStream(headers)
+		if err != nil {
+			return err
+		}
+		defer p.remoteStdin.Reset()
+	}
+
+	if p.Stdout != nil {
+		headers.Set(v1.StreamType, v1.StreamTypeStdout)
+		p.remoteStdout, err = conn.CreateStream(headers)
+		if err != nil {
+			return err
+		}
+		defer p.remoteStdout.Reset()
+	}
+
+	if p.Stderr != nil && !p.Tty {
+		headers.Set(v1.StreamType, v1.StreamTypeStderr)
+		p.remoteStderr, err = conn.CreateStream(headers)
+		if err != nil {
+			return err
+		}
+		defer p.remoteStderr.Reset()
+	}
+
+	// now that all the streams have been created, proceed with reading & copying
+
+	// always read from errorStream
+	go func() {
+		message, err := ioutil.ReadAll(p.errorStream)
+		if err != nil && err != io.EOF {
+			errorChan <- fmt.Errorf("Error reading from error stream: %s", err)
+			return
+		}
+		if len(message) > 0 {
+			errorChan <- fmt.Errorf("Error executing remote command: %s", message)
+			return
+		}
+	}()
+
+	if p.Stdin != nil {
+		// TODO this goroutine will never exit cleanly (the io.Copy never unblocks)
+		// because stdin is not closed until the process exits. If we try to call
+		// stdin.Close(), it returns no error but doesn't unblock the copy. It will
+		// exit when the process exits, instead.
+		go cp(v1.StreamTypeStdin, p.remoteStdin, readerWrapper{p.Stdin})
+	}
+
+	waitCount := 0
+	completedStreams := 0
+
+	if p.Stdout != nil {
+		waitCount++
+		go cp(v1.StreamTypeStdout, p.Stdout, p.remoteStdout)
+	}
+
+	if p.Stderr != nil && !p.Tty {
+		waitCount++
+		go cp(v1.StreamTypeStderr, p.Stderr, p.remoteStderr)
+	}
+
+Loop:
+	for {
+		select {
+		case <-doneChan:
+			completedStreams++
+			if completedStreams == waitCount {
+				break Loop
+			}
+		case err := <-errorChan:
+			return err
+		}
+	}
+
+	return nil
+}
diff --git a/vendor/k8s.io/client-go/tools/remotecommand/v2.go b/vendor/k8s.io/client-go/tools/remotecommand/v2.go
new file mode 100644
index 00000000..4b000150
--- /dev/null
+++ b/vendor/k8s.io/client-go/tools/remotecommand/v2.go
@@ -0,0 +1,195 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package remotecommand
+
+import (
+	"fmt"
+	"io"
+	"io/ioutil"
+	"net/http"
+	"sync"
+
+	"k8s.io/api/core/v1"
+	"k8s.io/apimachinery/pkg/util/runtime"
+)
+
+// streamProtocolV2 implements version 2 of the streaming protocol for attach
+// and exec. The original streaming protocol was metav1. As a result, this
+// version is referred to as version 2, even though it is the first actual
+// numbered version.
+type streamProtocolV2 struct {
+	StreamOptions
+
+	errorStream  io.Reader
+	remoteStdin  io.ReadWriteCloser
+	remoteStdout io.Reader
+	remoteStderr io.Reader
+}
+
+var _ streamProtocolHandler = &streamProtocolV2{}
+
+func newStreamProtocolV2(options StreamOptions) streamProtocolHandler {
+	return &streamProtocolV2{
+		StreamOptions: options,
+	}
+}
+
+func (p *streamProtocolV2) createStreams(conn streamCreator) error {
+	var err error
+	headers := http.Header{}
+
+	// set up error stream
+	headers.Set(v1.StreamType, v1.StreamTypeError)
+	p.errorStream, err = conn.CreateStream(headers)
+	if err != nil {
+		return err
+	}
+
+	// set up stdin stream
+	if p.Stdin != nil {
+		headers.Set(v1.StreamType, v1.StreamTypeStdin)
+		p.remoteStdin, err = conn.CreateStream(headers)
+		if err != nil {
+			return err
+		}
+	}
+
+	// set up stdout stream
+	if p.Stdout != nil {
+		headers.Set(v1.StreamType, v1.StreamTypeStdout)
+		p.remoteStdout, err = conn.CreateStream(headers)
+		if err != nil {
+			return err
+		}
+	}
+
+	// set up stderr stream
+	if p.Stderr != nil && !p.Tty {
+		headers.Set(v1.StreamType, v1.StreamTypeStderr)
+		p.remoteStderr, err = conn.CreateStream(headers)
+		if err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+func (p *streamProtocolV2) copyStdin() {
+	if p.Stdin != nil {
+		var once sync.Once
+
+		// copy from client's stdin to container's stdin
+		go func() {
+			defer runtime.HandleCrash()
+
+			// if p.stdin is noninteractive, p.g. `echo abc | kubectl exec -i <pod> -- cat`, make sure
+			// we close remoteStdin as soon as the copy from p.stdin to remoteStdin finishes. Otherwise
+			// the executed command will remain running.
+			defer once.Do(func() { p.remoteStdin.Close() })
+
+			if _, err := io.Copy(p.remoteStdin, readerWrapper{p.Stdin}); err != nil {
+				runtime.HandleError(err)
+			}
+		}()
+
+		// read from remoteStdin until the stream is closed. this is essential to
+		// be able to exit interactive sessions cleanly and not leak goroutines or
+		// hang the client's terminal.
+		//
+		// TODO we aren't using go-dockerclient any more; revisit this to determine if it's still
+		// required by engine-api.
+		//
+		// go-dockerclient's current hijack implementation
+		// (https://github.com/fsouza/go-dockerclient/blob/89f3d56d93788dfe85f864a44f85d9738fca0670/client.go#L564)
+		// waits for all three streams (stdin/stdout/stderr) to finish copying
+		// before returning. When hijack finishes copying stdout/stderr, it calls
+		// Close() on its side of remoteStdin, which allows this copy to complete.
+		// When that happens, we must Close() on our side of remoteStdin, to
+		// allow the copy in hijack to complete, and hijack to return.
+		go func() {
+			defer runtime.HandleCrash()
+			defer once.Do(func() { p.remoteStdin.Close() })
+
+			// this "copy" doesn't actually read anything - it's just here to wait for
+			// the server to close remoteStdin.
+			if _, err := io.Copy(ioutil.Discard, p.remoteStdin); err != nil {
+				runtime.HandleError(err)
+			}
+		}()
+	}
+}
+
+func (p *streamProtocolV2) copyStdout(wg *sync.WaitGroup) {
+	if p.Stdout == nil {
+		return
+	}
+
+	wg.Add(1)
+	go func() {
+		defer runtime.HandleCrash()
+		defer wg.Done()
+
+		if _, err := io.Copy(p.Stdout, p.remoteStdout); err != nil {
+			runtime.HandleError(err)
+		}
+	}()
+}
+
+func (p *streamProtocolV2) copyStderr(wg *sync.WaitGroup) {
+	if p.Stderr == nil || p.Tty {
+		return
+	}
+
+	wg.Add(1)
+	go func() {
+		defer runtime.HandleCrash()
+		defer wg.Done()
+
+		if _, err := io.Copy(p.Stderr, p.remoteStderr); err != nil {
+			runtime.HandleError(err)
+		}
+	}()
+}
+
+func (p *streamProtocolV2) stream(conn streamCreator) error {
+	if err := p.createStreams(conn); err != nil {
+		return err
+	}
+
+	// now that all the streams have been created, proceed with reading & copying
+
+	errorChan := watchErrorStream(p.errorStream, &errorDecoderV2{})
+
+	p.copyStdin()
+
+	var wg sync.WaitGroup
+	p.copyStdout(&wg)
+	p.copyStderr(&wg)
+
+	// we're waiting for stdout/stderr to finish copying
+	wg.Wait()
+
+	// waits for errorStream to finish reading with an error or nil
+	return <-errorChan
+}
+
+// errorDecoderV2 interprets the error channel data as plain text.
+type errorDecoderV2 struct{}
+
+func (d *errorDecoderV2) decode(message []byte) error {
+	return fmt.Errorf("error executing remote command: %s", message)
+}
diff --git a/vendor/k8s.io/client-go/tools/remotecommand/v3.go b/vendor/k8s.io/client-go/tools/remotecommand/v3.go
new file mode 100644
index 00000000..846dd24a
--- /dev/null
+++ b/vendor/k8s.io/client-go/tools/remotecommand/v3.go
@@ -0,0 +1,111 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package remotecommand
+
+import (
+	"encoding/json"
+	"io"
+	"net/http"
+	"sync"
+
+	"k8s.io/api/core/v1"
+	"k8s.io/apimachinery/pkg/util/runtime"
+)
+
+// streamProtocolV3 implements version 3 of the streaming protocol for attach
+// and exec. This version adds support for resizing the container's terminal.
+type streamProtocolV3 struct {
+	*streamProtocolV2
+
+	resizeStream io.Writer
+}
+
+var _ streamProtocolHandler = &streamProtocolV3{}
+
+func newStreamProtocolV3(options StreamOptions) streamProtocolHandler {
+	return &streamProtocolV3{
+		streamProtocolV2: newStreamProtocolV2(options).(*streamProtocolV2),
+	}
+}
+
+func (p *streamProtocolV3) createStreams(conn streamCreator) error {
+	// set up the streams from v2
+	if err := p.streamProtocolV2.createStreams(conn); err != nil {
+		return err
+	}
+
+	// set up resize stream
+	if p.Tty {
+		headers := http.Header{}
+		headers.Set(v1.StreamType, v1.StreamTypeResize)
+		var err error
+		p.resizeStream, err = conn.CreateStream(headers)
+		if err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (p *streamProtocolV3) handleResizes() {
+	if p.resizeStream == nil || p.TerminalSizeQueue == nil {
+		return
+	}
+	go func() {
+		defer runtime.HandleCrash()
+
+		encoder := json.NewEncoder(p.resizeStream)
+		for {
+			size := p.TerminalSizeQueue.Next()
+			if size == nil {
+				return
+			}
+			if err := encoder.Encode(&size); err != nil {
+				runtime.HandleError(err)
+			}
+		}
+	}()
+}
+
+func (p *streamProtocolV3) stream(conn streamCreator) error {
+	if err := p.createStreams(conn); err != nil {
+		return err
+	}
+
+	// now that all the streams have been created, proceed with reading & copying
+
+	errorChan := watchErrorStream(p.errorStream, &errorDecoderV3{})
+
+	p.handleResizes()
+
+	p.copyStdin()
+
+	var wg sync.WaitGroup
+	p.copyStdout(&wg)
+	p.copyStderr(&wg)
+
+	// we're waiting for stdout/stderr to finish copying
+	wg.Wait()
+
+	// waits for errorStream to finish reading with an error or nil
+	return <-errorChan
+}
+
+type errorDecoderV3 struct {
+	errorDecoderV2
+}
diff --git a/vendor/k8s.io/client-go/tools/remotecommand/v4.go b/vendor/k8s.io/client-go/tools/remotecommand/v4.go
new file mode 100644
index 00000000..69ca934a
--- /dev/null
+++ b/vendor/k8s.io/client-go/tools/remotecommand/v4.go
@@ -0,0 +1,119 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package remotecommand
+
+import (
+	"encoding/json"
+	"errors"
+	"fmt"
+	"strconv"
+	"sync"
+
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	"k8s.io/apimachinery/pkg/util/remotecommand"
+	"k8s.io/client-go/util/exec"
+)
+
+// streamProtocolV4 implements version 4 of the streaming protocol for attach
+// and exec. This version adds support for exit codes on the error stream through
+// the use of metav1.Status instead of plain text messages.
+type streamProtocolV4 struct {
+	*streamProtocolV3
+}
+
+var _ streamProtocolHandler = &streamProtocolV4{}
+
+func newStreamProtocolV4(options StreamOptions) streamProtocolHandler {
+	return &streamProtocolV4{
+		streamProtocolV3: newStreamProtocolV3(options).(*streamProtocolV3),
+	}
+}
+
+func (p *streamProtocolV4) createStreams(conn streamCreator) error {
+	return p.streamProtocolV3.createStreams(conn)
+}
+
+func (p *streamProtocolV4) handleResizes() {
+	p.streamProtocolV3.handleResizes()
+}
+
+func (p *streamProtocolV4) stream(conn streamCreator) error {
+	if err := p.createStreams(conn); err != nil {
+		return err
+	}
+
+	// now that all the streams have been created, proceed with reading & copying
+
+	errorChan := watchErrorStream(p.errorStream, &errorDecoderV4{})
+
+	p.handleResizes()
+
+	p.copyStdin()
+
+	var wg sync.WaitGroup
+	p.copyStdout(&wg)
+	p.copyStderr(&wg)
+
+	// we're waiting for stdout/stderr to finish copying
+	wg.Wait()
+
+	// waits for errorStream to finish reading with an error or nil
+	return <-errorChan
+}
+
+// errorDecoderV4 interprets the json-marshaled metav1.Status on the error channel
+// and creates an exec.ExitError from it.
+type errorDecoderV4 struct{}
+
+func (d *errorDecoderV4) decode(message []byte) error {
+	status := metav1.Status{}
+	err := json.Unmarshal(message, &status)
+	if err != nil {
+		return fmt.Errorf("error stream protocol error: %v in %q", err, string(message))
+	}
+	switch status.Status {
+	case metav1.StatusSuccess:
+		return nil
+	case metav1.StatusFailure:
+		if status.Reason == remotecommand.NonZeroExitCodeReason {
+			if status.Details == nil {
+				return errors.New("error stream protocol error: details must be set")
+			}
+			for i := range status.Details.Causes {
+				c := &status.Details.Causes[i]
+				if c.Type != remotecommand.ExitCodeCauseType {
+					continue
+				}
+
+				rc, err := strconv.ParseUint(c.Message, 10, 8)
+				if err != nil {
+					return fmt.Errorf("error stream protocol error: invalid exit code value %q", c.Message)
+				}
+				return exec.CodeExitError{
+					Err:  fmt.Errorf("command terminated with exit code %d", rc),
+					Code: int(rc),
+				}
+			}
+
+			return fmt.Errorf("error stream protocol error: no %s cause given", remotecommand.ExitCodeCauseType)
+		}
+	default:
+		return errors.New("error stream protocol error: unknown error")
+	}
+
+	return fmt.Errorf(status.Message)
+}
diff --git a/vendor/k8s.io/client-go/transport/cache.go b/vendor/k8s.io/client-go/transport/cache.go
index 980d36ae..36d6500f 100644
--- a/vendor/k8s.io/client-go/transport/cache.go
+++ b/vendor/k8s.io/client-go/transport/cache.go
@@ -25,6 +25,7 @@ import (
 	"time"
 
 	utilnet "k8s.io/apimachinery/pkg/util/net"
+	"k8s.io/apimachinery/pkg/util/wait"
 )
 
 // TlsTransportCache caches TLS http.RoundTrippers different configurations. The
@@ -44,6 +45,8 @@ type tlsCacheKey struct {
 	caData             string
 	certData           string
 	keyData            string
+	certFile           string
+	keyFile            string
 	getCert            string
 	serverName         string
 	nextProtos         string
@@ -91,6 +94,16 @@ func (c *tlsTransportCache) get(config *Config) (http.RoundTripper, error) {
 			KeepAlive: 30 * time.Second,
 		}).DialContext
 	}
+
+	// If we use are reloading files, we need to handle certificate rotation properly
+	// TODO(jackkleeman): We can also add rotation here when config.HasCertCallback() is true
+	if config.TLS.ReloadTLSFiles {
+		dynamicCertDialer := certRotatingDialer(tlsConfig.GetClientCertificate, dial)
+		tlsConfig.GetClientCertificate = dynamicCertDialer.GetClientCertificate
+		dial = dynamicCertDialer.connDialer.DialContext
+		go dynamicCertDialer.Run(wait.NeverStop)
+	}
+
 	// Cache a single transport for these options
 	c.transports[key] = utilnet.SetTransportDefaults(&http.Transport{
 		Proxy:               http.ProxyFromEnvironment,
@@ -109,15 +122,23 @@ func tlsConfigKey(c *Config) (tlsCacheKey, error) {
 	if err := loadTLSFiles(c); err != nil {
 		return tlsCacheKey{}, err
 	}
-	return tlsCacheKey{
+	k := tlsCacheKey{
 		insecure:           c.TLS.Insecure,
 		caData:             string(c.TLS.CAData),
-		certData:           string(c.TLS.CertData),
-		keyData:            string(c.TLS.KeyData),
 		getCert:            fmt.Sprintf("%p", c.TLS.GetCert),
 		serverName:         c.TLS.ServerName,
 		nextProtos:         strings.Join(c.TLS.NextProtos, ","),
 		dial:               fmt.Sprintf("%p", c.Dial),
 		disableCompression: c.DisableCompression,
-	}, nil
+	}
+
+	if c.TLS.ReloadTLSFiles {
+		k.certFile = c.TLS.CertFile
+		k.keyFile = c.TLS.KeyFile
+	} else {
+		k.certData = string(c.TLS.CertData)
+		k.keyData = string(c.TLS.KeyData)
+	}
+
+	return k, nil
 }
diff --git a/vendor/k8s.io/client-go/transport/cert_rotation.go b/vendor/k8s.io/client-go/transport/cert_rotation.go
new file mode 100644
index 00000000..918e77f9
--- /dev/null
+++ b/vendor/k8s.io/client-go/transport/cert_rotation.go
@@ -0,0 +1,176 @@
+/*
+Copyright 2020 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package transport
+
+import (
+	"bytes"
+	"crypto/tls"
+	"fmt"
+	"reflect"
+	"sync"
+	"time"
+
+	utilnet "k8s.io/apimachinery/pkg/util/net"
+	utilruntime "k8s.io/apimachinery/pkg/util/runtime"
+	"k8s.io/apimachinery/pkg/util/wait"
+	"k8s.io/client-go/util/connrotation"
+	"k8s.io/client-go/util/workqueue"
+	"k8s.io/klog"
+)
+
+const workItemKey = "key"
+
+// CertCallbackRefreshDuration is exposed so that integration tests can crank up the reload speed.
+var CertCallbackRefreshDuration = 5 * time.Minute
+
+type reloadFunc func(*tls.CertificateRequestInfo) (*tls.Certificate, error)
+
+type dynamicClientCert struct {
+	clientCert *tls.Certificate
+	certMtx    sync.RWMutex
+
+	reload     reloadFunc
+	connDialer *connrotation.Dialer
+
+	// queue only ever has one item, but it has nice error handling backoff/retry semantics
+	queue workqueue.RateLimitingInterface
+}
+
+func certRotatingDialer(reload reloadFunc, dial utilnet.DialFunc) *dynamicClientCert {
+	d := &dynamicClientCert{
+		reload:     reload,
+		connDialer: connrotation.NewDialer(connrotation.DialFunc(dial)),
+		queue:      workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "DynamicClientCertificate"),
+	}
+
+	return d
+}
+
+// loadClientCert calls the callback and rotates connections if needed
+func (c *dynamicClientCert) loadClientCert() (*tls.Certificate, error) {
+	cert, err := c.reload(nil)
+	if err != nil {
+		return nil, err
+	}
+
+	// check to see if we have a change. If the values are the same, do nothing.
+	c.certMtx.RLock()
+	haveCert := c.clientCert != nil
+	if certsEqual(c.clientCert, cert) {
+		c.certMtx.RUnlock()
+		return c.clientCert, nil
+	}
+	c.certMtx.RUnlock()
+
+	c.certMtx.Lock()
+	c.clientCert = cert
+	c.certMtx.Unlock()
+
+	// The first certificate requested is not a rotation that is worth closing connections for
+	if !haveCert {
+		return cert, nil
+	}
+
+	klog.V(1).Infof("certificate rotation detected, shutting down client connections to start using new credentials")
+	c.connDialer.CloseAll()
+
+	return cert, nil
+}
+
+// certsEqual compares tls Certificates, ignoring the Leaf which may get filled in dynamically
+func certsEqual(left, right *tls.Certificate) bool {
+	if left == nil || right == nil {
+		return left == right
+	}
+
+	if !byteMatrixEqual(left.Certificate, right.Certificate) {
+		return false
+	}
+
+	if !reflect.DeepEqual(left.PrivateKey, right.PrivateKey) {
+		return false
+	}
+
+	if !byteMatrixEqual(left.SignedCertificateTimestamps, right.SignedCertificateTimestamps) {
+		return false
+	}
+
+	if !bytes.Equal(left.OCSPStaple, right.OCSPStaple) {
+		return false
+	}
+
+	return true
+}
+
+func byteMatrixEqual(left, right [][]byte) bool {
+	if len(left) != len(right) {
+		return false
+	}
+
+	for i := range left {
+		if !bytes.Equal(left[i], right[i]) {
+			return false
+		}
+	}
+	return true
+}
+
+// run starts the controller and blocks until stopCh is closed.
+func (c *dynamicClientCert) Run(stopCh <-chan struct{}) {
+	defer utilruntime.HandleCrash()
+	defer c.queue.ShutDown()
+
+	klog.V(3).Infof("Starting client certificate rotation controller")
+	defer klog.V(3).Infof("Shutting down client certificate rotation controller")
+
+	go wait.Until(c.runWorker, time.Second, stopCh)
+
+	go wait.PollImmediateUntil(CertCallbackRefreshDuration, func() (bool, error) {
+		c.queue.Add(workItemKey)
+		return false, nil
+	}, stopCh)
+
+	<-stopCh
+}
+
+func (c *dynamicClientCert) runWorker() {
+	for c.processNextWorkItem() {
+	}
+}
+
+func (c *dynamicClientCert) processNextWorkItem() bool {
+	dsKey, quit := c.queue.Get()
+	if quit {
+		return false
+	}
+	defer c.queue.Done(dsKey)
+
+	_, err := c.loadClientCert()
+	if err == nil {
+		c.queue.Forget(dsKey)
+		return true
+	}
+
+	utilruntime.HandleError(fmt.Errorf("%v failed with : %v", dsKey, err))
+	c.queue.AddRateLimited(dsKey)
+
+	return true
+}
+
+func (c *dynamicClientCert) GetClientCertificate(*tls.CertificateRequestInfo) (*tls.Certificate, error) {
+	return c.loadClientCert()
+}
diff --git a/vendor/k8s.io/client-go/transport/config.go b/vendor/k8s.io/client-go/transport/config.go
index 9e18d11d..c20a4a8f 100644
--- a/vendor/k8s.io/client-go/transport/config.go
+++ b/vendor/k8s.io/client-go/transport/config.go
@@ -115,9 +115,10 @@ func (c *Config) Wrap(fn WrapperFunc) {
 
 // TLSConfig holds the information needed to set up a TLS transport.
 type TLSConfig struct {
-	CAFile   string // Path of the PEM-encoded server trusted root certificates.
-	CertFile string // Path of the PEM-encoded client certificate.
-	KeyFile  string // Path of the PEM-encoded client key.
+	CAFile         string // Path of the PEM-encoded server trusted root certificates.
+	CertFile       string // Path of the PEM-encoded client certificate.
+	KeyFile        string // Path of the PEM-encoded client key.
+	ReloadTLSFiles bool   // Set to indicate that the original config provided files, and that they should be reloaded
 
 	Insecure   bool   // Server should be accessed without verifying the certificate. For testing only.
 	ServerName string // Override for the server name passed to the server for SNI and used to verify certificates.
diff --git a/vendor/k8s.io/client-go/transport/spdy/spdy.go b/vendor/k8s.io/client-go/transport/spdy/spdy.go
new file mode 100644
index 00000000..53cc7ee1
--- /dev/null
+++ b/vendor/k8s.io/client-go/transport/spdy/spdy.go
@@ -0,0 +1,94 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package spdy
+
+import (
+	"fmt"
+	"net/http"
+	"net/url"
+
+	"k8s.io/apimachinery/pkg/util/httpstream"
+	"k8s.io/apimachinery/pkg/util/httpstream/spdy"
+	restclient "k8s.io/client-go/rest"
+)
+
+// Upgrader validates a response from the server after a SPDY upgrade.
+type Upgrader interface {
+	// NewConnection validates the response and creates a new Connection.
+	NewConnection(resp *http.Response) (httpstream.Connection, error)
+}
+
+// RoundTripperFor returns a round tripper and upgrader to use with SPDY.
+func RoundTripperFor(config *restclient.Config) (http.RoundTripper, Upgrader, error) {
+	tlsConfig, err := restclient.TLSConfigFor(config)
+	if err != nil {
+		return nil, nil, err
+	}
+	upgradeRoundTripper := spdy.NewRoundTripper(tlsConfig, true, false)
+	wrapper, err := restclient.HTTPWrappersForConfig(config, upgradeRoundTripper)
+	if err != nil {
+		return nil, nil, err
+	}
+	return wrapper, upgradeRoundTripper, nil
+}
+
+// dialer implements the httpstream.Dialer interface.
+type dialer struct {
+	client   *http.Client
+	upgrader Upgrader
+	method   string
+	url      *url.URL
+}
+
+var _ httpstream.Dialer = &dialer{}
+
+// NewDialer will create a dialer that connects to the provided URL and upgrades the connection to SPDY.
+func NewDialer(upgrader Upgrader, client *http.Client, method string, url *url.URL) httpstream.Dialer {
+	return &dialer{
+		client:   client,
+		upgrader: upgrader,
+		method:   method,
+		url:      url,
+	}
+}
+
+func (d *dialer) Dial(protocols ...string) (httpstream.Connection, string, error) {
+	req, err := http.NewRequest(d.method, d.url.String(), nil)
+	if err != nil {
+		return nil, "", fmt.Errorf("error creating request: %v", err)
+	}
+	return Negotiate(d.upgrader, d.client, req, protocols...)
+}
+
+// Negotiate opens a connection to a remote server and attempts to negotiate
+// a SPDY connection. Upon success, it returns the connection and the protocol selected by
+// the server. The client transport must use the upgradeRoundTripper - see RoundTripperFor.
+func Negotiate(upgrader Upgrader, client *http.Client, req *http.Request, protocols ...string) (httpstream.Connection, string, error) {
+	for i := range protocols {
+		req.Header.Add(httpstream.HeaderProtocolVersion, protocols[i])
+	}
+	resp, err := client.Do(req)
+	if err != nil {
+		return nil, "", fmt.Errorf("error sending request: %v", err)
+	}
+	defer resp.Body.Close()
+	conn, err := upgrader.NewConnection(resp)
+	if err != nil {
+		return nil, "", err
+	}
+	return conn, resp.Header.Get(httpstream.HeaderProtocolVersion), nil
+}
diff --git a/vendor/k8s.io/client-go/transport/transport.go b/vendor/k8s.io/client-go/transport/transport.go
index cd8de982..143ebfa5 100644
--- a/vendor/k8s.io/client-go/transport/transport.go
+++ b/vendor/k8s.io/client-go/transport/transport.go
@@ -23,6 +23,8 @@ import (
 	"fmt"
 	"io/ioutil"
 	"net/http"
+	"sync"
+	"time"
 
 	utilnet "k8s.io/apimachinery/pkg/util/net"
 	"k8s.io/klog"
@@ -81,7 +83,8 @@ func TLSConfigFor(c *Config) (*tls.Config, error) {
 	}
 
 	var staticCert *tls.Certificate
-	if c.HasCertAuth() {
+	// Treat cert as static if either key or cert was data, not a file
+	if c.HasCertAuth() && !c.TLS.ReloadTLSFiles {
 		// If key/cert were provided, verify them before setting up
 		// tlsConfig.GetClientCertificate.
 		cert, err := tls.X509KeyPair(c.TLS.CertData, c.TLS.KeyData)
@@ -91,6 +94,11 @@ func TLSConfigFor(c *Config) (*tls.Config, error) {
 		staticCert = &cert
 	}
 
+	var dynamicCertLoader func() (*tls.Certificate, error)
+	if c.TLS.ReloadTLSFiles {
+		dynamicCertLoader = cachingCertificateLoader(c.TLS.CertFile, c.TLS.KeyFile)
+	}
+
 	if c.HasCertAuth() || c.HasCertCallback() {
 		tlsConfig.GetClientCertificate = func(*tls.CertificateRequestInfo) (*tls.Certificate, error) {
 			// Note: static key/cert data always take precedence over cert
@@ -98,6 +106,10 @@ func TLSConfigFor(c *Config) (*tls.Config, error) {
 			if staticCert != nil {
 				return staticCert, nil
 			}
+			// key/cert files lead to ReloadTLSFiles being set - takes precedence over cert callback
+			if dynamicCertLoader != nil {
+				return dynamicCertLoader()
+			}
 			if c.HasCertCallback() {
 				cert, err := c.TLS.GetCert()
 				if err != nil {
@@ -129,6 +141,11 @@ func loadTLSFiles(c *Config) error {
 		return err
 	}
 
+	// Check that we are purely loading from files
+	if len(c.TLS.CertFile) > 0 && len(c.TLS.CertData) == 0 && len(c.TLS.KeyFile) > 0 && len(c.TLS.KeyData) == 0 {
+		c.TLS.ReloadTLSFiles = true
+	}
+
 	c.TLS.CertData, err = dataFromSliceOrFile(c.TLS.CertData, c.TLS.CertFile)
 	if err != nil {
 		return err
@@ -243,3 +260,44 @@ func tryCancelRequest(rt http.RoundTripper, req *http.Request) {
 		klog.Warningf("Unable to cancel request for %T", rt)
 	}
 }
+
+type certificateCacheEntry struct {
+	cert  *tls.Certificate
+	err   error
+	birth time.Time
+}
+
+// isStale returns true when this cache entry is too old to be usable
+func (c *certificateCacheEntry) isStale() bool {
+	return time.Now().Sub(c.birth) > time.Second
+}
+
+func newCertificateCacheEntry(certFile, keyFile string) certificateCacheEntry {
+	cert, err := tls.LoadX509KeyPair(certFile, keyFile)
+	return certificateCacheEntry{cert: &cert, err: err, birth: time.Now()}
+}
+
+// cachingCertificateLoader ensures that we don't hammer the filesystem when opening many connections
+// the underlying cert files are read at most once every second
+func cachingCertificateLoader(certFile, keyFile string) func() (*tls.Certificate, error) {
+	current := newCertificateCacheEntry(certFile, keyFile)
+	var currentMtx sync.RWMutex
+
+	return func() (*tls.Certificate, error) {
+		currentMtx.RLock()
+		if current.isStale() {
+			currentMtx.RUnlock()
+
+			currentMtx.Lock()
+			defer currentMtx.Unlock()
+
+			if current.isStale() {
+				current = newCertificateCacheEntry(certFile, keyFile)
+			}
+		} else {
+			defer currentMtx.RUnlock()
+		}
+
+		return current.cert, current.err
+	}
+}
diff --git a/vendor/k8s.io/client-go/util/connrotation/connrotation.go b/vendor/k8s.io/client-go/util/connrotation/connrotation.go
index 235a9e01..f98faee4 100644
--- a/vendor/k8s.io/client-go/util/connrotation/connrotation.go
+++ b/vendor/k8s.io/client-go/util/connrotation/connrotation.go
@@ -77,11 +77,6 @@ func (d *Dialer) DialContext(ctx context.Context, network, address string) (net.
 
 	closable := &closableConn{Conn: conn}
 
-	// Start tracking the connection
-	d.mu.Lock()
-	d.conns[closable] = struct{}{}
-	d.mu.Unlock()
-
 	// When the connection is closed, remove it from the map. This will
 	// be no-op if the connection isn't in the map, e.g. if CloseAll()
 	// is called.
@@ -91,6 +86,11 @@ func (d *Dialer) DialContext(ctx context.Context, network, address string) (net.
 		d.mu.Unlock()
 	}
 
+	// Start tracking the connection
+	d.mu.Lock()
+	d.conns[closable] = struct{}{}
+	d.mu.Unlock()
+
 	return closable, nil
 }
 
diff --git a/vendor/k8s.io/client-go/util/exec/exec.go b/vendor/k8s.io/client-go/util/exec/exec.go
new file mode 100644
index 00000000..d170badb
--- /dev/null
+++ b/vendor/k8s.io/client-go/util/exec/exec.go
@@ -0,0 +1,52 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package exec
+
+// ExitError is an interface that presents an API similar to os.ProcessState, which is
+// what ExitError from os/exec is.  This is designed to make testing a bit easier and
+// probably loses some of the cross-platform properties of the underlying library.
+type ExitError interface {
+	String() string
+	Error() string
+	Exited() bool
+	ExitStatus() int
+}
+
+// CodeExitError is an implementation of ExitError consisting of an error object
+// and an exit code (the upper bits of os.exec.ExitStatus).
+type CodeExitError struct {
+	Err  error
+	Code int
+}
+
+var _ ExitError = CodeExitError{}
+
+func (e CodeExitError) Error() string {
+	return e.Err.Error()
+}
+
+func (e CodeExitError) String() string {
+	return e.Err.Error()
+}
+
+func (e CodeExitError) Exited() bool {
+	return true
+}
+
+func (e CodeExitError) ExitStatus() int {
+	return e.Code
+}
diff --git a/vendor/k8s.io/client-go/util/jsonpath/doc.go b/vendor/k8s.io/client-go/util/jsonpath/doc.go
new file mode 100644
index 00000000..0effb15c
--- /dev/null
+++ b/vendor/k8s.io/client-go/util/jsonpath/doc.go
@@ -0,0 +1,20 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// package jsonpath is a template engine using jsonpath syntax,
+// which can be seen at http://goessner.net/articles/JsonPath/.
+// In addition, it has {range} {end} function to iterate list and slice.
+package jsonpath // import "k8s.io/client-go/util/jsonpath"
diff --git a/vendor/k8s.io/client-go/util/jsonpath/jsonpath.go b/vendor/k8s.io/client-go/util/jsonpath/jsonpath.go
new file mode 100644
index 00000000..78b6b678
--- /dev/null
+++ b/vendor/k8s.io/client-go/util/jsonpath/jsonpath.go
@@ -0,0 +1,525 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package jsonpath
+
+import (
+	"bytes"
+	"fmt"
+	"io"
+	"reflect"
+	"strings"
+
+	"k8s.io/client-go/third_party/forked/golang/template"
+)
+
+type JSONPath struct {
+	name       string
+	parser     *Parser
+	stack      [][]reflect.Value // push and pop values in different scopes
+	cur        []reflect.Value   // current scope values
+	beginRange int
+	inRange    int
+	endRange   int
+
+	allowMissingKeys bool
+}
+
+// New creates a new JSONPath with the given name.
+func New(name string) *JSONPath {
+	return &JSONPath{
+		name:       name,
+		beginRange: 0,
+		inRange:    0,
+		endRange:   0,
+	}
+}
+
+// AllowMissingKeys allows a caller to specify whether they want an error if a field or map key
+// cannot be located, or simply an empty result. The receiver is returned for chaining.
+func (j *JSONPath) AllowMissingKeys(allow bool) *JSONPath {
+	j.allowMissingKeys = allow
+	return j
+}
+
+// Parse parses the given template and returns an error.
+func (j *JSONPath) Parse(text string) error {
+	var err error
+	j.parser, err = Parse(j.name, text)
+	return err
+}
+
+// Execute bounds data into template and writes the result.
+func (j *JSONPath) Execute(wr io.Writer, data interface{}) error {
+	fullResults, err := j.FindResults(data)
+	if err != nil {
+		return err
+	}
+	for ix := range fullResults {
+		if err := j.PrintResults(wr, fullResults[ix]); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+func (j *JSONPath) FindResults(data interface{}) ([][]reflect.Value, error) {
+	if j.parser == nil {
+		return nil, fmt.Errorf("%s is an incomplete jsonpath template", j.name)
+	}
+
+	j.cur = []reflect.Value{reflect.ValueOf(data)}
+	nodes := j.parser.Root.Nodes
+	fullResult := [][]reflect.Value{}
+	for i := 0; i < len(nodes); i++ {
+		node := nodes[i]
+		results, err := j.walk(j.cur, node)
+		if err != nil {
+			return nil, err
+		}
+
+		// encounter an end node, break the current block
+		if j.endRange > 0 && j.endRange <= j.inRange {
+			j.endRange--
+			break
+		}
+		// encounter a range node, start a range loop
+		if j.beginRange > 0 {
+			j.beginRange--
+			j.inRange++
+			for k, value := range results {
+				j.parser.Root.Nodes = nodes[i+1:]
+				if k == len(results)-1 {
+					j.inRange--
+				}
+				nextResults, err := j.FindResults(value.Interface())
+				if err != nil {
+					return nil, err
+				}
+				fullResult = append(fullResult, nextResults...)
+			}
+			break
+		}
+		fullResult = append(fullResult, results)
+	}
+	return fullResult, nil
+}
+
+// PrintResults writes the results into writer
+func (j *JSONPath) PrintResults(wr io.Writer, results []reflect.Value) error {
+	for i, r := range results {
+		text, err := j.evalToText(r)
+		if err != nil {
+			return err
+		}
+		if i != len(results)-1 {
+			text = append(text, ' ')
+		}
+		if _, err = wr.Write(text); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+// walk visits tree rooted at the given node in DFS order
+func (j *JSONPath) walk(value []reflect.Value, node Node) ([]reflect.Value, error) {
+	switch node := node.(type) {
+	case *ListNode:
+		return j.evalList(value, node)
+	case *TextNode:
+		return []reflect.Value{reflect.ValueOf(node.Text)}, nil
+	case *FieldNode:
+		return j.evalField(value, node)
+	case *ArrayNode:
+		return j.evalArray(value, node)
+	case *FilterNode:
+		return j.evalFilter(value, node)
+	case *IntNode:
+		return j.evalInt(value, node)
+	case *BoolNode:
+		return j.evalBool(value, node)
+	case *FloatNode:
+		return j.evalFloat(value, node)
+	case *WildcardNode:
+		return j.evalWildcard(value, node)
+	case *RecursiveNode:
+		return j.evalRecursive(value, node)
+	case *UnionNode:
+		return j.evalUnion(value, node)
+	case *IdentifierNode:
+		return j.evalIdentifier(value, node)
+	default:
+		return value, fmt.Errorf("unexpected Node %v", node)
+	}
+}
+
+// evalInt evaluates IntNode
+func (j *JSONPath) evalInt(input []reflect.Value, node *IntNode) ([]reflect.Value, error) {
+	result := make([]reflect.Value, len(input))
+	for i := range input {
+		result[i] = reflect.ValueOf(node.Value)
+	}
+	return result, nil
+}
+
+// evalFloat evaluates FloatNode
+func (j *JSONPath) evalFloat(input []reflect.Value, node *FloatNode) ([]reflect.Value, error) {
+	result := make([]reflect.Value, len(input))
+	for i := range input {
+		result[i] = reflect.ValueOf(node.Value)
+	}
+	return result, nil
+}
+
+// evalBool evaluates BoolNode
+func (j *JSONPath) evalBool(input []reflect.Value, node *BoolNode) ([]reflect.Value, error) {
+	result := make([]reflect.Value, len(input))
+	for i := range input {
+		result[i] = reflect.ValueOf(node.Value)
+	}
+	return result, nil
+}
+
+// evalList evaluates ListNode
+func (j *JSONPath) evalList(value []reflect.Value, node *ListNode) ([]reflect.Value, error) {
+	var err error
+	curValue := value
+	for _, node := range node.Nodes {
+		curValue, err = j.walk(curValue, node)
+		if err != nil {
+			return curValue, err
+		}
+	}
+	return curValue, nil
+}
+
+// evalIdentifier evaluates IdentifierNode
+func (j *JSONPath) evalIdentifier(input []reflect.Value, node *IdentifierNode) ([]reflect.Value, error) {
+	results := []reflect.Value{}
+	switch node.Name {
+	case "range":
+		j.stack = append(j.stack, j.cur)
+		j.beginRange++
+		results = input
+	case "end":
+		if j.endRange < j.inRange { // inside a loop, break the current block
+			j.endRange++
+			break
+		}
+		// the loop is about to end, pop value and continue the following execution
+		if len(j.stack) > 0 {
+			j.cur, j.stack = j.stack[len(j.stack)-1], j.stack[:len(j.stack)-1]
+		} else {
+			return results, fmt.Errorf("not in range, nothing to end")
+		}
+	default:
+		return input, fmt.Errorf("unrecognized identifier %v", node.Name)
+	}
+	return results, nil
+}
+
+// evalArray evaluates ArrayNode
+func (j *JSONPath) evalArray(input []reflect.Value, node *ArrayNode) ([]reflect.Value, error) {
+	result := []reflect.Value{}
+	for _, value := range input {
+
+		value, isNil := template.Indirect(value)
+		if isNil {
+			continue
+		}
+		if value.Kind() != reflect.Array && value.Kind() != reflect.Slice {
+			return input, fmt.Errorf("%v is not array or slice", value.Type())
+		}
+		params := node.Params
+		if !params[0].Known {
+			params[0].Value = 0
+		}
+		if params[0].Value < 0 {
+			params[0].Value += value.Len()
+		}
+		if !params[1].Known {
+			params[1].Value = value.Len()
+		}
+
+		if params[1].Value < 0 || (params[1].Value == 0 && params[1].Derived) {
+			params[1].Value += value.Len()
+		}
+		sliceLength := value.Len()
+		if params[1].Value != params[0].Value { // if you're requesting zero elements, allow it through.
+			if params[0].Value >= sliceLength || params[0].Value < 0 {
+				return input, fmt.Errorf("array index out of bounds: index %d, length %d", params[0].Value, sliceLength)
+			}
+			if params[1].Value > sliceLength || params[1].Value < 0 {
+				return input, fmt.Errorf("array index out of bounds: index %d, length %d", params[1].Value-1, sliceLength)
+			}
+			if params[0].Value > params[1].Value {
+				return input, fmt.Errorf("starting index %d is greater than ending index %d", params[0].Value, params[1].Value)
+			}
+		} else {
+			return result, nil
+		}
+
+		value = value.Slice(params[0].Value, params[1].Value)
+
+		step := 1
+		if params[2].Known {
+			if params[2].Value <= 0 {
+				return input, fmt.Errorf("step must be > 0")
+			}
+			step = params[2].Value
+		}
+		for i := 0; i < value.Len(); i += step {
+			result = append(result, value.Index(i))
+		}
+	}
+	return result, nil
+}
+
+// evalUnion evaluates UnionNode
+func (j *JSONPath) evalUnion(input []reflect.Value, node *UnionNode) ([]reflect.Value, error) {
+	result := []reflect.Value{}
+	for _, listNode := range node.Nodes {
+		temp, err := j.evalList(input, listNode)
+		if err != nil {
+			return input, err
+		}
+		result = append(result, temp...)
+	}
+	return result, nil
+}
+
+func (j *JSONPath) findFieldInValue(value *reflect.Value, node *FieldNode) (reflect.Value, error) {
+	t := value.Type()
+	var inlineValue *reflect.Value
+	for ix := 0; ix < t.NumField(); ix++ {
+		f := t.Field(ix)
+		jsonTag := f.Tag.Get("json")
+		parts := strings.Split(jsonTag, ",")
+		if len(parts) == 0 {
+			continue
+		}
+		if parts[0] == node.Value {
+			return value.Field(ix), nil
+		}
+		if len(parts[0]) == 0 {
+			val := value.Field(ix)
+			inlineValue = &val
+		}
+	}
+	if inlineValue != nil {
+		if inlineValue.Kind() == reflect.Struct {
+			// handle 'inline'
+			match, err := j.findFieldInValue(inlineValue, node)
+			if err != nil {
+				return reflect.Value{}, err
+			}
+			if match.IsValid() {
+				return match, nil
+			}
+		}
+	}
+	return value.FieldByName(node.Value), nil
+}
+
+// evalField evaluates field of struct or key of map.
+func (j *JSONPath) evalField(input []reflect.Value, node *FieldNode) ([]reflect.Value, error) {
+	results := []reflect.Value{}
+	// If there's no input, there's no output
+	if len(input) == 0 {
+		return results, nil
+	}
+	for _, value := range input {
+		var result reflect.Value
+		value, isNil := template.Indirect(value)
+		if isNil {
+			continue
+		}
+
+		if value.Kind() == reflect.Struct {
+			var err error
+			if result, err = j.findFieldInValue(&value, node); err != nil {
+				return nil, err
+			}
+		} else if value.Kind() == reflect.Map {
+			mapKeyType := value.Type().Key()
+			nodeValue := reflect.ValueOf(node.Value)
+			// node value type must be convertible to map key type
+			if !nodeValue.Type().ConvertibleTo(mapKeyType) {
+				return results, fmt.Errorf("%s is not convertible to %s", nodeValue, mapKeyType)
+			}
+			result = value.MapIndex(nodeValue.Convert(mapKeyType))
+		}
+		if result.IsValid() {
+			results = append(results, result)
+		}
+	}
+	if len(results) == 0 {
+		if j.allowMissingKeys {
+			return results, nil
+		}
+		return results, fmt.Errorf("%s is not found", node.Value)
+	}
+	return results, nil
+}
+
+// evalWildcard extracts all contents of the given value
+func (j *JSONPath) evalWildcard(input []reflect.Value, node *WildcardNode) ([]reflect.Value, error) {
+	results := []reflect.Value{}
+	for _, value := range input {
+		value, isNil := template.Indirect(value)
+		if isNil {
+			continue
+		}
+
+		kind := value.Kind()
+		if kind == reflect.Struct {
+			for i := 0; i < value.NumField(); i++ {
+				results = append(results, value.Field(i))
+			}
+		} else if kind == reflect.Map {
+			for _, key := range value.MapKeys() {
+				results = append(results, value.MapIndex(key))
+			}
+		} else if kind == reflect.Array || kind == reflect.Slice || kind == reflect.String {
+			for i := 0; i < value.Len(); i++ {
+				results = append(results, value.Index(i))
+			}
+		}
+	}
+	return results, nil
+}
+
+// evalRecursive visits the given value recursively and pushes all of them to result
+func (j *JSONPath) evalRecursive(input []reflect.Value, node *RecursiveNode) ([]reflect.Value, error) {
+	result := []reflect.Value{}
+	for _, value := range input {
+		results := []reflect.Value{}
+		value, isNil := template.Indirect(value)
+		if isNil {
+			continue
+		}
+
+		kind := value.Kind()
+		if kind == reflect.Struct {
+			for i := 0; i < value.NumField(); i++ {
+				results = append(results, value.Field(i))
+			}
+		} else if kind == reflect.Map {
+			for _, key := range value.MapKeys() {
+				results = append(results, value.MapIndex(key))
+			}
+		} else if kind == reflect.Array || kind == reflect.Slice || kind == reflect.String {
+			for i := 0; i < value.Len(); i++ {
+				results = append(results, value.Index(i))
+			}
+		}
+		if len(results) != 0 {
+			result = append(result, value)
+			output, err := j.evalRecursive(results, node)
+			if err != nil {
+				return result, err
+			}
+			result = append(result, output...)
+		}
+	}
+	return result, nil
+}
+
+// evalFilter filters array according to FilterNode
+func (j *JSONPath) evalFilter(input []reflect.Value, node *FilterNode) ([]reflect.Value, error) {
+	results := []reflect.Value{}
+	for _, value := range input {
+		value, _ = template.Indirect(value)
+
+		if value.Kind() != reflect.Array && value.Kind() != reflect.Slice {
+			return input, fmt.Errorf("%v is not array or slice and cannot be filtered", value)
+		}
+		for i := 0; i < value.Len(); i++ {
+			temp := []reflect.Value{value.Index(i)}
+			lefts, err := j.evalList(temp, node.Left)
+
+			//case exists
+			if node.Operator == "exists" {
+				if len(lefts) > 0 {
+					results = append(results, value.Index(i))
+				}
+				continue
+			}
+
+			if err != nil {
+				return input, err
+			}
+
+			var left, right interface{}
+			switch {
+			case len(lefts) == 0:
+				continue
+			case len(lefts) > 1:
+				return input, fmt.Errorf("can only compare one element at a time")
+			}
+			left = lefts[0].Interface()
+
+			rights, err := j.evalList(temp, node.Right)
+			if err != nil {
+				return input, err
+			}
+			switch {
+			case len(rights) == 0:
+				continue
+			case len(rights) > 1:
+				return input, fmt.Errorf("can only compare one element at a time")
+			}
+			right = rights[0].Interface()
+
+			pass := false
+			switch node.Operator {
+			case "<":
+				pass, err = template.Less(left, right)
+			case ">":
+				pass, err = template.Greater(left, right)
+			case "==":
+				pass, err = template.Equal(left, right)
+			case "!=":
+				pass, err = template.NotEqual(left, right)
+			case "<=":
+				pass, err = template.LessEqual(left, right)
+			case ">=":
+				pass, err = template.GreaterEqual(left, right)
+			default:
+				return results, fmt.Errorf("unrecognized filter operator %s", node.Operator)
+			}
+			if err != nil {
+				return results, err
+			}
+			if pass {
+				results = append(results, value.Index(i))
+			}
+		}
+	}
+	return results, nil
+}
+
+// evalToText translates reflect value to corresponding text
+func (j *JSONPath) evalToText(v reflect.Value) ([]byte, error) {
+	iface, ok := template.PrintableValue(v)
+	if !ok {
+		return nil, fmt.Errorf("can't print type %s", v.Type())
+	}
+	var buffer bytes.Buffer
+	fmt.Fprint(&buffer, iface)
+	return buffer.Bytes(), nil
+}
diff --git a/vendor/k8s.io/client-go/util/jsonpath/node.go b/vendor/k8s.io/client-go/util/jsonpath/node.go
new file mode 100644
index 00000000..83abe8b0
--- /dev/null
+++ b/vendor/k8s.io/client-go/util/jsonpath/node.go
@@ -0,0 +1,256 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package jsonpath
+
+import "fmt"
+
+// NodeType identifies the type of a parse tree node.
+type NodeType int
+
+// Type returns itself and provides an easy default implementation
+func (t NodeType) Type() NodeType {
+	return t
+}
+
+func (t NodeType) String() string {
+	return NodeTypeName[t]
+}
+
+const (
+	NodeText NodeType = iota
+	NodeArray
+	NodeList
+	NodeField
+	NodeIdentifier
+	NodeFilter
+	NodeInt
+	NodeFloat
+	NodeWildcard
+	NodeRecursive
+	NodeUnion
+	NodeBool
+)
+
+var NodeTypeName = map[NodeType]string{
+	NodeText:       "NodeText",
+	NodeArray:      "NodeArray",
+	NodeList:       "NodeList",
+	NodeField:      "NodeField",
+	NodeIdentifier: "NodeIdentifier",
+	NodeFilter:     "NodeFilter",
+	NodeInt:        "NodeInt",
+	NodeFloat:      "NodeFloat",
+	NodeWildcard:   "NodeWildcard",
+	NodeRecursive:  "NodeRecursive",
+	NodeUnion:      "NodeUnion",
+	NodeBool:       "NodeBool",
+}
+
+type Node interface {
+	Type() NodeType
+	String() string
+}
+
+// ListNode holds a sequence of nodes.
+type ListNode struct {
+	NodeType
+	Nodes []Node // The element nodes in lexical order.
+}
+
+func newList() *ListNode {
+	return &ListNode{NodeType: NodeList}
+}
+
+func (l *ListNode) append(n Node) {
+	l.Nodes = append(l.Nodes, n)
+}
+
+func (l *ListNode) String() string {
+	return l.Type().String()
+}
+
+// TextNode holds plain text.
+type TextNode struct {
+	NodeType
+	Text string // The text; may span newlines.
+}
+
+func newText(text string) *TextNode {
+	return &TextNode{NodeType: NodeText, Text: text}
+}
+
+func (t *TextNode) String() string {
+	return fmt.Sprintf("%s: %s", t.Type(), t.Text)
+}
+
+// FieldNode holds field of struct
+type FieldNode struct {
+	NodeType
+	Value string
+}
+
+func newField(value string) *FieldNode {
+	return &FieldNode{NodeType: NodeField, Value: value}
+}
+
+func (f *FieldNode) String() string {
+	return fmt.Sprintf("%s: %s", f.Type(), f.Value)
+}
+
+// IdentifierNode holds an identifier
+type IdentifierNode struct {
+	NodeType
+	Name string
+}
+
+func newIdentifier(value string) *IdentifierNode {
+	return &IdentifierNode{
+		NodeType: NodeIdentifier,
+		Name:     value,
+	}
+}
+
+func (f *IdentifierNode) String() string {
+	return fmt.Sprintf("%s: %s", f.Type(), f.Name)
+}
+
+// ParamsEntry holds param information for ArrayNode
+type ParamsEntry struct {
+	Value   int
+	Known   bool // whether the value is known when parse it
+	Derived bool
+}
+
+// ArrayNode holds start, end, step information for array index selection
+type ArrayNode struct {
+	NodeType
+	Params [3]ParamsEntry // start, end, step
+}
+
+func newArray(params [3]ParamsEntry) *ArrayNode {
+	return &ArrayNode{
+		NodeType: NodeArray,
+		Params:   params,
+	}
+}
+
+func (a *ArrayNode) String() string {
+	return fmt.Sprintf("%s: %v", a.Type(), a.Params)
+}
+
+// FilterNode holds operand and operator information for filter
+type FilterNode struct {
+	NodeType
+	Left     *ListNode
+	Right    *ListNode
+	Operator string
+}
+
+func newFilter(left, right *ListNode, operator string) *FilterNode {
+	return &FilterNode{
+		NodeType: NodeFilter,
+		Left:     left,
+		Right:    right,
+		Operator: operator,
+	}
+}
+
+func (f *FilterNode) String() string {
+	return fmt.Sprintf("%s: %s %s %s", f.Type(), f.Left, f.Operator, f.Right)
+}
+
+// IntNode holds integer value
+type IntNode struct {
+	NodeType
+	Value int
+}
+
+func newInt(num int) *IntNode {
+	return &IntNode{NodeType: NodeInt, Value: num}
+}
+
+func (i *IntNode) String() string {
+	return fmt.Sprintf("%s: %d", i.Type(), i.Value)
+}
+
+// FloatNode holds float value
+type FloatNode struct {
+	NodeType
+	Value float64
+}
+
+func newFloat(num float64) *FloatNode {
+	return &FloatNode{NodeType: NodeFloat, Value: num}
+}
+
+func (i *FloatNode) String() string {
+	return fmt.Sprintf("%s: %f", i.Type(), i.Value)
+}
+
+// WildcardNode means a wildcard
+type WildcardNode struct {
+	NodeType
+}
+
+func newWildcard() *WildcardNode {
+	return &WildcardNode{NodeType: NodeWildcard}
+}
+
+func (i *WildcardNode) String() string {
+	return i.Type().String()
+}
+
+// RecursiveNode means a recursive descent operator
+type RecursiveNode struct {
+	NodeType
+}
+
+func newRecursive() *RecursiveNode {
+	return &RecursiveNode{NodeType: NodeRecursive}
+}
+
+func (r *RecursiveNode) String() string {
+	return r.Type().String()
+}
+
+// UnionNode is union of ListNode
+type UnionNode struct {
+	NodeType
+	Nodes []*ListNode
+}
+
+func newUnion(nodes []*ListNode) *UnionNode {
+	return &UnionNode{NodeType: NodeUnion, Nodes: nodes}
+}
+
+func (u *UnionNode) String() string {
+	return u.Type().String()
+}
+
+// BoolNode holds bool value
+type BoolNode struct {
+	NodeType
+	Value bool
+}
+
+func newBool(value bool) *BoolNode {
+	return &BoolNode{NodeType: NodeBool, Value: value}
+}
+
+func (b *BoolNode) String() string {
+	return fmt.Sprintf("%s: %t", b.Type(), b.Value)
+}
diff --git a/vendor/k8s.io/client-go/util/jsonpath/parser.go b/vendor/k8s.io/client-go/util/jsonpath/parser.go
new file mode 100644
index 00000000..e1aab680
--- /dev/null
+++ b/vendor/k8s.io/client-go/util/jsonpath/parser.go
@@ -0,0 +1,524 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package jsonpath
+
+import (
+	"errors"
+	"fmt"
+	"regexp"
+	"strconv"
+	"strings"
+	"unicode"
+	"unicode/utf8"
+)
+
+const eof = -1
+
+const (
+	leftDelim  = "{"
+	rightDelim = "}"
+)
+
+type Parser struct {
+	Name  string
+	Root  *ListNode
+	input string
+	pos   int
+	start int
+	width int
+}
+
+var (
+	ErrSyntax        = errors.New("invalid syntax")
+	dictKeyRex       = regexp.MustCompile(`^'([^']*)'$`)
+	sliceOperatorRex = regexp.MustCompile(`^(-?[\d]*)(:-?[\d]*)?(:-?[\d]*)?$`)
+)
+
+// Parse parsed the given text and return a node Parser.
+// If an error is encountered, parsing stops and an empty
+// Parser is returned with the error
+func Parse(name, text string) (*Parser, error) {
+	p := NewParser(name)
+	err := p.Parse(text)
+	if err != nil {
+		p = nil
+	}
+	return p, err
+}
+
+func NewParser(name string) *Parser {
+	return &Parser{
+		Name: name,
+	}
+}
+
+// parseAction parsed the expression inside delimiter
+func parseAction(name, text string) (*Parser, error) {
+	p, err := Parse(name, fmt.Sprintf("%s%s%s", leftDelim, text, rightDelim))
+	// when error happens, p will be nil, so we need to return here
+	if err != nil {
+		return p, err
+	}
+	p.Root = p.Root.Nodes[0].(*ListNode)
+	return p, nil
+}
+
+func (p *Parser) Parse(text string) error {
+	p.input = text
+	p.Root = newList()
+	p.pos = 0
+	return p.parseText(p.Root)
+}
+
+// consumeText return the parsed text since last cosumeText
+func (p *Parser) consumeText() string {
+	value := p.input[p.start:p.pos]
+	p.start = p.pos
+	return value
+}
+
+// next returns the next rune in the input.
+func (p *Parser) next() rune {
+	if p.pos >= len(p.input) {
+		p.width = 0
+		return eof
+	}
+	r, w := utf8.DecodeRuneInString(p.input[p.pos:])
+	p.width = w
+	p.pos += p.width
+	return r
+}
+
+// peek returns but does not consume the next rune in the input.
+func (p *Parser) peek() rune {
+	r := p.next()
+	p.backup()
+	return r
+}
+
+// backup steps back one rune. Can only be called once per call of next.
+func (p *Parser) backup() {
+	p.pos -= p.width
+}
+
+func (p *Parser) parseText(cur *ListNode) error {
+	for {
+		if strings.HasPrefix(p.input[p.pos:], leftDelim) {
+			if p.pos > p.start {
+				cur.append(newText(p.consumeText()))
+			}
+			return p.parseLeftDelim(cur)
+		}
+		if p.next() == eof {
+			break
+		}
+	}
+	// Correctly reached EOF.
+	if p.pos > p.start {
+		cur.append(newText(p.consumeText()))
+	}
+	return nil
+}
+
+// parseLeftDelim scans the left delimiter, which is known to be present.
+func (p *Parser) parseLeftDelim(cur *ListNode) error {
+	p.pos += len(leftDelim)
+	p.consumeText()
+	newNode := newList()
+	cur.append(newNode)
+	cur = newNode
+	return p.parseInsideAction(cur)
+}
+
+func (p *Parser) parseInsideAction(cur *ListNode) error {
+	prefixMap := map[string]func(*ListNode) error{
+		rightDelim: p.parseRightDelim,
+		"[?(":      p.parseFilter,
+		"..":       p.parseRecursive,
+	}
+	for prefix, parseFunc := range prefixMap {
+		if strings.HasPrefix(p.input[p.pos:], prefix) {
+			return parseFunc(cur)
+		}
+	}
+
+	switch r := p.next(); {
+	case r == eof || isEndOfLine(r):
+		return fmt.Errorf("unclosed action")
+	case r == ' ':
+		p.consumeText()
+	case r == '@' || r == '$': //the current object, just pass it
+		p.consumeText()
+	case r == '[':
+		return p.parseArray(cur)
+	case r == '"' || r == '\'':
+		return p.parseQuote(cur, r)
+	case r == '.':
+		return p.parseField(cur)
+	case r == '+' || r == '-' || unicode.IsDigit(r):
+		p.backup()
+		return p.parseNumber(cur)
+	case isAlphaNumeric(r):
+		p.backup()
+		return p.parseIdentifier(cur)
+	default:
+		return fmt.Errorf("unrecognized character in action: %#U", r)
+	}
+	return p.parseInsideAction(cur)
+}
+
+// parseRightDelim scans the right delimiter, which is known to be present.
+func (p *Parser) parseRightDelim(cur *ListNode) error {
+	p.pos += len(rightDelim)
+	p.consumeText()
+	return p.parseText(p.Root)
+}
+
+// parseIdentifier scans build-in keywords, like "range" "end"
+func (p *Parser) parseIdentifier(cur *ListNode) error {
+	var r rune
+	for {
+		r = p.next()
+		if isTerminator(r) {
+			p.backup()
+			break
+		}
+	}
+	value := p.consumeText()
+
+	if isBool(value) {
+		v, err := strconv.ParseBool(value)
+		if err != nil {
+			return fmt.Errorf("can not parse bool '%s': %s", value, err.Error())
+		}
+
+		cur.append(newBool(v))
+	} else {
+		cur.append(newIdentifier(value))
+	}
+
+	return p.parseInsideAction(cur)
+}
+
+// parseRecursive scans the recursive desent operator ..
+func (p *Parser) parseRecursive(cur *ListNode) error {
+	p.pos += len("..")
+	p.consumeText()
+	cur.append(newRecursive())
+	if r := p.peek(); isAlphaNumeric(r) {
+		return p.parseField(cur)
+	}
+	return p.parseInsideAction(cur)
+}
+
+// parseNumber scans number
+func (p *Parser) parseNumber(cur *ListNode) error {
+	r := p.peek()
+	if r == '+' || r == '-' {
+		p.next()
+	}
+	for {
+		r = p.next()
+		if r != '.' && !unicode.IsDigit(r) {
+			p.backup()
+			break
+		}
+	}
+	value := p.consumeText()
+	i, err := strconv.Atoi(value)
+	if err == nil {
+		cur.append(newInt(i))
+		return p.parseInsideAction(cur)
+	}
+	d, err := strconv.ParseFloat(value, 64)
+	if err == nil {
+		cur.append(newFloat(d))
+		return p.parseInsideAction(cur)
+	}
+	return fmt.Errorf("cannot parse number %s", value)
+}
+
+// parseArray scans array index selection
+func (p *Parser) parseArray(cur *ListNode) error {
+Loop:
+	for {
+		switch p.next() {
+		case eof, '\n':
+			return fmt.Errorf("unterminated array")
+		case ']':
+			break Loop
+		}
+	}
+	text := p.consumeText()
+	text = text[1 : len(text)-1]
+	if text == "*" {
+		text = ":"
+	}
+
+	//union operator
+	strs := strings.Split(text, ",")
+	if len(strs) > 1 {
+		union := []*ListNode{}
+		for _, str := range strs {
+			parser, err := parseAction("union", fmt.Sprintf("[%s]", strings.Trim(str, " ")))
+			if err != nil {
+				return err
+			}
+			union = append(union, parser.Root)
+		}
+		cur.append(newUnion(union))
+		return p.parseInsideAction(cur)
+	}
+
+	// dict key
+	value := dictKeyRex.FindStringSubmatch(text)
+	if value != nil {
+		parser, err := parseAction("arraydict", fmt.Sprintf(".%s", value[1]))
+		if err != nil {
+			return err
+		}
+		for _, node := range parser.Root.Nodes {
+			cur.append(node)
+		}
+		return p.parseInsideAction(cur)
+	}
+
+	//slice operator
+	value = sliceOperatorRex.FindStringSubmatch(text)
+	if value == nil {
+		return fmt.Errorf("invalid array index %s", text)
+	}
+	value = value[1:]
+	params := [3]ParamsEntry{}
+	for i := 0; i < 3; i++ {
+		if value[i] != "" {
+			if i > 0 {
+				value[i] = value[i][1:]
+			}
+			if i > 0 && value[i] == "" {
+				params[i].Known = false
+			} else {
+				var err error
+				params[i].Known = true
+				params[i].Value, err = strconv.Atoi(value[i])
+				if err != nil {
+					return fmt.Errorf("array index %s is not a number", value[i])
+				}
+			}
+		} else {
+			if i == 1 {
+				params[i].Known = true
+				params[i].Value = params[0].Value + 1
+				params[i].Derived = true
+			} else {
+				params[i].Known = false
+				params[i].Value = 0
+			}
+		}
+	}
+	cur.append(newArray(params))
+	return p.parseInsideAction(cur)
+}
+
+// parseFilter scans filter inside array selection
+func (p *Parser) parseFilter(cur *ListNode) error {
+	p.pos += len("[?(")
+	p.consumeText()
+	begin := false
+	end := false
+	var pair rune
+
+Loop:
+	for {
+		r := p.next()
+		switch r {
+		case eof, '\n':
+			return fmt.Errorf("unterminated filter")
+		case '"', '\'':
+			if begin == false {
+				//save the paired rune
+				begin = true
+				pair = r
+				continue
+			}
+			//only add when met paired rune
+			if p.input[p.pos-2] != '\\' && r == pair {
+				end = true
+			}
+		case ')':
+			//in rightParser below quotes only appear zero or once
+			//and must be paired at the beginning and end
+			if begin == end {
+				break Loop
+			}
+		}
+	}
+	if p.next() != ']' {
+		return fmt.Errorf("unclosed array expect ]")
+	}
+	reg := regexp.MustCompile(`^([^!<>=]+)([!<>=]+)(.+?)$`)
+	text := p.consumeText()
+	text = text[:len(text)-2]
+	value := reg.FindStringSubmatch(text)
+	if value == nil {
+		parser, err := parseAction("text", text)
+		if err != nil {
+			return err
+		}
+		cur.append(newFilter(parser.Root, newList(), "exists"))
+	} else {
+		leftParser, err := parseAction("left", value[1])
+		if err != nil {
+			return err
+		}
+		rightParser, err := parseAction("right", value[3])
+		if err != nil {
+			return err
+		}
+		cur.append(newFilter(leftParser.Root, rightParser.Root, value[2]))
+	}
+	return p.parseInsideAction(cur)
+}
+
+// parseQuote unquotes string inside double or single quote
+func (p *Parser) parseQuote(cur *ListNode, end rune) error {
+Loop:
+	for {
+		switch p.next() {
+		case eof, '\n':
+			return fmt.Errorf("unterminated quoted string")
+		case end:
+			//if it's not escape break the Loop
+			if p.input[p.pos-2] != '\\' {
+				break Loop
+			}
+		}
+	}
+	value := p.consumeText()
+	s, err := UnquoteExtend(value)
+	if err != nil {
+		return fmt.Errorf("unquote string %s error %v", value, err)
+	}
+	cur.append(newText(s))
+	return p.parseInsideAction(cur)
+}
+
+// parseField scans a field until a terminator
+func (p *Parser) parseField(cur *ListNode) error {
+	p.consumeText()
+	for p.advance() {
+	}
+	value := p.consumeText()
+	if value == "*" {
+		cur.append(newWildcard())
+	} else {
+		cur.append(newField(strings.Replace(value, "\\", "", -1)))
+	}
+	return p.parseInsideAction(cur)
+}
+
+// advance scans until next non-escaped terminator
+func (p *Parser) advance() bool {
+	r := p.next()
+	if r == '\\' {
+		p.next()
+	} else if isTerminator(r) {
+		p.backup()
+		return false
+	}
+	return true
+}
+
+// isTerminator reports whether the input is at valid termination character to appear after an identifier.
+func isTerminator(r rune) bool {
+	if isSpace(r) || isEndOfLine(r) {
+		return true
+	}
+	switch r {
+	case eof, '.', ',', '[', ']', '$', '@', '{', '}':
+		return true
+	}
+	return false
+}
+
+// isSpace reports whether r is a space character.
+func isSpace(r rune) bool {
+	return r == ' ' || r == '\t'
+}
+
+// isEndOfLine reports whether r is an end-of-line character.
+func isEndOfLine(r rune) bool {
+	return r == '\r' || r == '\n'
+}
+
+// isAlphaNumeric reports whether r is an alphabetic, digit, or underscore.
+func isAlphaNumeric(r rune) bool {
+	return r == '_' || unicode.IsLetter(r) || unicode.IsDigit(r)
+}
+
+// isBool reports whether s is a boolean value.
+func isBool(s string) bool {
+	return s == "true" || s == "false"
+}
+
+//UnquoteExtend is almost same as strconv.Unquote(), but it support parse single quotes as a string
+func UnquoteExtend(s string) (string, error) {
+	n := len(s)
+	if n < 2 {
+		return "", ErrSyntax
+	}
+	quote := s[0]
+	if quote != s[n-1] {
+		return "", ErrSyntax
+	}
+	s = s[1 : n-1]
+
+	if quote != '"' && quote != '\'' {
+		return "", ErrSyntax
+	}
+
+	// Is it trivial?  Avoid allocation.
+	if !contains(s, '\\') && !contains(s, quote) {
+		return s, nil
+	}
+
+	var runeTmp [utf8.UTFMax]byte
+	buf := make([]byte, 0, 3*len(s)/2) // Try to avoid more allocations.
+	for len(s) > 0 {
+		c, multibyte, ss, err := strconv.UnquoteChar(s, quote)
+		if err != nil {
+			return "", err
+		}
+		s = ss
+		if c < utf8.RuneSelf || !multibyte {
+			buf = append(buf, byte(c))
+		} else {
+			n := utf8.EncodeRune(runeTmp[:], c)
+			buf = append(buf, runeTmp[:n]...)
+		}
+	}
+	return string(buf), nil
+}
+
+func contains(s string, c byte) bool {
+	for i := 0; i < len(s); i++ {
+		if s[i] == c {
+			return true
+		}
+	}
+	return false
+}
diff --git a/vendor/k8s.io/client-go/util/retry/util.go b/vendor/k8s.io/client-go/util/retry/util.go
deleted file mode 100644
index 15e2722f..00000000
--- a/vendor/k8s.io/client-go/util/retry/util.go
+++ /dev/null
@@ -1,105 +0,0 @@
-/*
-Copyright 2016 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package retry
-
-import (
-	"time"
-
-	"k8s.io/apimachinery/pkg/api/errors"
-	"k8s.io/apimachinery/pkg/util/wait"
-)
-
-// DefaultRetry is the recommended retry for a conflict where multiple clients
-// are making changes to the same resource.
-var DefaultRetry = wait.Backoff{
-	Steps:    5,
-	Duration: 10 * time.Millisecond,
-	Factor:   1.0,
-	Jitter:   0.1,
-}
-
-// DefaultBackoff is the recommended backoff for a conflict where a client
-// may be attempting to make an unrelated modification to a resource under
-// active management by one or more controllers.
-var DefaultBackoff = wait.Backoff{
-	Steps:    4,
-	Duration: 10 * time.Millisecond,
-	Factor:   5.0,
-	Jitter:   0.1,
-}
-
-// OnError allows the caller to retry fn in case the error returned by fn is retriable
-// according to the provided function. backoff defines the maximum retries and the wait
-// interval between two retries.
-func OnError(backoff wait.Backoff, retriable func(error) bool, fn func() error) error {
-	var lastErr error
-	err := wait.ExponentialBackoff(backoff, func() (bool, error) {
-		err := fn()
-		switch {
-		case err == nil:
-			return true, nil
-		case retriable(err):
-			lastErr = err
-			return false, nil
-		default:
-			return false, err
-		}
-	})
-	if err == wait.ErrWaitTimeout {
-		err = lastErr
-	}
-	return err
-}
-
-// RetryOnConflict is used to make an update to a resource when you have to worry about
-// conflicts caused by other code making unrelated updates to the resource at the same
-// time. fn should fetch the resource to be modified, make appropriate changes to it, try
-// to update it, and return (unmodified) the error from the update function. On a
-// successful update, RetryOnConflict will return nil. If the update function returns a
-// "Conflict" error, RetryOnConflict will wait some amount of time as described by
-// backoff, and then try again. On a non-"Conflict" error, or if it retries too many times
-// and gives up, RetryOnConflict will return an error to the caller.
-//
-//     err := retry.RetryOnConflict(retry.DefaultRetry, func() error {
-//         // Fetch the resource here; you need to refetch it on every try, since
-//         // if you got a conflict on the last update attempt then you need to get
-//         // the current version before making your own changes.
-//         pod, err := c.Pods("mynamespace").Get(name, metav1.GetOptions{})
-//         if err ! nil {
-//             return err
-//         }
-//
-//         // Make whatever updates to the resource are needed
-//         pod.Status.Phase = v1.PodFailed
-//
-//         // Try to update
-//         _, err = c.Pods("mynamespace").UpdateStatus(pod)
-//         // You have to return err itself here (not wrapped inside another error)
-//         // so that RetryOnConflict can identify it correctly.
-//         return err
-//     })
-//     if err != nil {
-//         // May be conflict if max retries were hit, or may be something unrelated
-//         // like permissions or a network error
-//         return err
-//     }
-//     ...
-//
-// TODO: Make Backoff an interface?
-func RetryOnConflict(backoff wait.Backoff, fn func() error) error {
-	return OnError(backoff, errors.IsConflict, fn)
-}
diff --git a/vendor/k8s.io/client-go/util/workqueue/default_rate_limiters.go b/vendor/k8s.io/client-go/util/workqueue/default_rate_limiters.go
index 71bb6322..6dc8ec5f 100644
--- a/vendor/k8s.io/client-go/util/workqueue/default_rate_limiters.go
+++ b/vendor/k8s.io/client-go/util/workqueue/default_rate_limiters.go
@@ -62,6 +62,54 @@ func (r *BucketRateLimiter) NumRequeues(item interface{}) int {
 func (r *BucketRateLimiter) Forget(item interface{}) {
 }
 
+// ItemBucketRateLimiter implements a workqueue ratelimiter API using standard rate.Limiter.
+// Each key is using a separate limiter.
+type ItemBucketRateLimiter struct {
+	r     rate.Limit
+	burst int
+
+	limitersLock sync.Mutex
+	limiters     map[interface{}]*rate.Limiter
+}
+
+var _ RateLimiter = &ItemBucketRateLimiter{}
+
+// NewItemBucketRateLimiter creates new ItemBucketRateLimiter instance.
+func NewItemBucketRateLimiter(r rate.Limit, burst int) *ItemBucketRateLimiter {
+	return &ItemBucketRateLimiter{
+		r:        r,
+		burst:    burst,
+		limiters: make(map[interface{}]*rate.Limiter),
+	}
+}
+
+// When returns a time.Duration which we need to wait before item is processed.
+func (r *ItemBucketRateLimiter) When(item interface{}) time.Duration {
+	r.limitersLock.Lock()
+	defer r.limitersLock.Unlock()
+
+	limiter, ok := r.limiters[item]
+	if !ok {
+		limiter = rate.NewLimiter(r.r, r.burst)
+		r.limiters[item] = limiter
+	}
+
+	return limiter.Reserve().Delay()
+}
+
+// NumRequeues returns always 0 (doesn't apply to ItemBucketRateLimiter).
+func (r *ItemBucketRateLimiter) NumRequeues(item interface{}) int {
+	return 0
+}
+
+// Forget removes item from the internal state.
+func (r *ItemBucketRateLimiter) Forget(item interface{}) {
+	r.limitersLock.Lock()
+	defer r.limitersLock.Unlock()
+
+	delete(r.limiters, item)
+}
+
 // ItemExponentialFailureRateLimiter does a simple baseDelay*2^<num-failures> limit
 // dealing with max failures and expiration are up to the caller
 type ItemExponentialFailureRateLimiter struct {
diff --git a/vendor/k8s.io/client-go/util/workqueue/metrics.go b/vendor/k8s.io/client-go/util/workqueue/metrics.go
index a3911bf2..556e6432 100644
--- a/vendor/k8s.io/client-go/util/workqueue/metrics.go
+++ b/vendor/k8s.io/client-go/util/workqueue/metrics.go
@@ -131,16 +131,14 @@ func (m *defaultQueueMetrics) updateUnfinishedWork() {
 	var total float64
 	var oldest float64
 	for _, t := range m.processingStartTimes {
-		age := m.sinceInMicroseconds(t)
+		age := m.sinceInSeconds(t)
 		total += age
 		if age > oldest {
 			oldest = age
 		}
 	}
-	// Convert to seconds; microseconds is unhelpfully granular for this.
-	total /= 1000000
 	m.unfinishedWorkSeconds.Set(total)
-	m.longestRunningProcessor.Set(oldest / 1000000)
+	m.longestRunningProcessor.Set(oldest)
 }
 
 type noMetrics struct{}
@@ -150,11 +148,6 @@ func (noMetrics) get(item t)            {}
 func (noMetrics) done(item t)           {}
 func (noMetrics) updateUnfinishedWork() {}
 
-// Gets the time since the specified start in microseconds.
-func (m *defaultQueueMetrics) sinceInMicroseconds(start time.Time) float64 {
-	return float64(m.clock.Since(start).Nanoseconds() / time.Microsecond.Nanoseconds())
-}
-
 // Gets the time since the specified start in seconds.
 func (m *defaultQueueMetrics) sinceInSeconds(start time.Time) float64 {
 	return m.clock.Since(start).Seconds()
diff --git a/vendor/k8s.io/component-base/LICENSE b/vendor/k8s.io/component-base/LICENSE
new file mode 100644
index 00000000..d6456956
--- /dev/null
+++ b/vendor/k8s.io/component-base/LICENSE
@@ -0,0 +1,202 @@
+
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff --git a/vendor/k8s.io/component-base/version/.gitattributes b/vendor/k8s.io/component-base/version/.gitattributes
new file mode 100644
index 00000000..7e349eff
--- /dev/null
+++ b/vendor/k8s.io/component-base/version/.gitattributes
@@ -0,0 +1 @@
+base.go export-subst
diff --git a/vendor/k8s.io/component-base/version/base.go b/vendor/k8s.io/component-base/version/base.go
new file mode 100644
index 00000000..e13678c3
--- /dev/null
+++ b/vendor/k8s.io/component-base/version/base.go
@@ -0,0 +1,63 @@
+/*
+Copyright 2019 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package version
+
+// Base version information.
+//
+// This is the fallback data used when version information from git is not
+// provided via go ldflags. It provides an approximation of the Kubernetes
+// version for ad-hoc builds (e.g. `go build`) that cannot get the version
+// information from git.
+//
+// If you are looking at these fields in the git tree, they look
+// strange. They are modified on the fly by the build process. The
+// in-tree values are dummy values used for "git archive", which also
+// works for GitHub tar downloads.
+//
+// When releasing a new Kubernetes version, this file is updated by
+// build/mark_new_version.sh to reflect the new version, and then a
+// git annotated tag (using format vX.Y where X == Major version and Y
+// == Minor version) is created to point to the commit that updates
+// component-base/version/base.go
+var (
+	// TODO: Deprecate gitMajor and gitMinor, use only gitVersion
+	// instead. First step in deprecation, keep the fields but make
+	// them irrelevant. (Next we'll take it out, which may muck with
+	// scripts consuming the kubectl version output - but most of
+	// these should be looking at gitVersion already anyways.)
+	gitMajor string // major version, always numeric
+	gitMinor string // minor version, numeric possibly followed by "+"
+
+	// semantic version, derived by build scripts (see
+	// https://github.com/kubernetes/community/blob/master/contributors/design-proposals/release/versioning.md
+	// for a detailed discussion of this field)
+	//
+	// TODO: This field is still called "gitVersion" for legacy
+	// reasons. For prerelease versions, the build metadata on the
+	// semantic version is a git hash, but the version itself is no
+	// longer the direct output of "git describe", but a slight
+	// translation to be semver compliant.
+
+	// NOTE: The $Format strings are replaced during 'git archive' thanks to the
+	// companion .gitattributes file containing 'export-subst' in this same
+	// directory.  See also https://git-scm.com/docs/gitattributes
+	gitVersion   = "v0.0.0-master+$Format:%h$"
+	gitCommit    = "$Format:%H$" // sha1 from git, output of $(git rev-parse HEAD)
+	gitTreeState = ""            // state of git tree, either "clean" or "dirty"
+
+	buildDate = "1970-01-01T00:00:00Z" // build date in ISO8601 format, output of $(date -u +'%Y-%m-%dT%H:%M:%SZ')
+)
diff --git a/vendor/k8s.io/component-base/version/def.bzl b/vendor/k8s.io/component-base/version/def.bzl
new file mode 100644
index 00000000..77edcbc8
--- /dev/null
+++ b/vendor/k8s.io/component-base/version/def.bzl
@@ -0,0 +1,39 @@
+# Copyright 2017 The Kubernetes Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Implements hack/lib/version.sh's kube::version::ldflags() for Bazel.
+def version_x_defs():
+    # This should match the list of packages in kube::version::ldflag
+    stamp_pkgs = [
+        "k8s.io/kubernetes/vendor/k8s.io/component-base/version",
+        "k8s.io/kubernetes/vendor/k8s.io/client-go/pkg/version",
+    ]
+
+    # This should match the list of vars in kube::version::ldflags
+    # It should also match the list of vars set in hack/print-workspace-status.sh.
+    stamp_vars = [
+        "buildDate",
+        "gitCommit",
+        "gitMajor",
+        "gitMinor",
+        "gitTreeState",
+        "gitVersion",
+    ]
+
+    # Generate the cross-product.
+    x_defs = {}
+    for pkg in stamp_pkgs:
+        for var in stamp_vars:
+            x_defs["%s.%s" % (pkg, var)] = "{%s}" % var
+    return x_defs
diff --git a/vendor/k8s.io/component-base/version/version.go b/vendor/k8s.io/component-base/version/version.go
new file mode 100644
index 00000000..d1e76dc0
--- /dev/null
+++ b/vendor/k8s.io/component-base/version/version.go
@@ -0,0 +1,42 @@
+/*
+Copyright 2019 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package version
+
+import (
+	"fmt"
+	"runtime"
+
+	apimachineryversion "k8s.io/apimachinery/pkg/version"
+)
+
+// Get returns the overall codebase version. It's for detecting
+// what code a binary was built from.
+func Get() apimachineryversion.Info {
+	// These variables typically come from -ldflags settings and in
+	// their absence fallback to the settings in ./base.go
+	return apimachineryversion.Info{
+		Major:        gitMajor,
+		Minor:        gitMinor,
+		GitVersion:   gitVersion,
+		GitCommit:    gitCommit,
+		GitTreeState: gitTreeState,
+		BuildDate:    buildDate,
+		GoVersion:    runtime.Version(),
+		Compiler:     runtime.Compiler,
+		Platform:     fmt.Sprintf("%s/%s", runtime.GOOS, runtime.GOARCH),
+	}
+}
diff --git a/vendor/k8s.io/kube-openapi/pkg/common/common.go b/vendor/k8s.io/kube-openapi/pkg/common/common.go
new file mode 100644
index 00000000..f1c87c30
--- /dev/null
+++ b/vendor/k8s.io/kube-openapi/pkg/common/common.go
@@ -0,0 +1,192 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package common
+
+import (
+	"net/http"
+	"strings"
+
+	"github.com/emicklei/go-restful"
+	"github.com/go-openapi/spec"
+)
+
+const (
+	// TODO: Make this configurable.
+	ExtensionPrefix = "x-kubernetes-"
+	ExtensionV2Schema = ExtensionPrefix + "v2-schema"
+)
+
+// OpenAPIDefinition describes single type. Normally these definitions are auto-generated using gen-openapi.
+type OpenAPIDefinition struct {
+	Schema       spec.Schema
+	Dependencies []string
+}
+
+type ReferenceCallback func(path string) spec.Ref
+
+// GetOpenAPIDefinitions is collection of all definitions.
+type GetOpenAPIDefinitions func(ReferenceCallback) map[string]OpenAPIDefinition
+
+// OpenAPIDefinitionGetter gets openAPI definitions for a given type. If a type implements this interface,
+// the definition returned by it will be used, otherwise the auto-generated definitions will be used. See
+// GetOpenAPITypeFormat for more information about trade-offs of using this interface or GetOpenAPITypeFormat method when
+// possible.
+type OpenAPIDefinitionGetter interface {
+	OpenAPIDefinition() *OpenAPIDefinition
+}
+
+type OpenAPIV3DefinitionGetter interface {
+	OpenAPIV3Definition() *OpenAPIDefinition
+}
+
+type PathHandler interface {
+	Handle(path string, handler http.Handler)
+}
+
+// Config is set of configuration for openAPI spec generation.
+type Config struct {
+	// List of supported protocols such as https, http, etc.
+	ProtocolList []string
+
+	// Info is general information about the API.
+	Info *spec.Info
+
+	// DefaultResponse will be used if an operation does not have any responses listed. It
+	// will show up as ... "responses" : {"default" : $DefaultResponse} in the spec.
+	DefaultResponse *spec.Response
+
+	// ResponseDefinitions will be added to "responses" under the top-level swagger object. This is an object
+	// that holds responses definitions that can be used across operations. This property does not define
+	// global responses for all operations. For more info please refer:
+	//     https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#fixed-fields
+	ResponseDefinitions map[string]spec.Response
+
+	// CommonResponses will be added as a response to all operation specs. This is a good place to add common
+	// responses such as authorization failed.
+	CommonResponses map[int]spec.Response
+
+	// List of webservice's path prefixes to ignore
+	IgnorePrefixes []string
+
+	// OpenAPIDefinitions should provide definition for all models used by routes. Failure to provide this map
+	// or any of the models will result in spec generation failure.
+	GetDefinitions GetOpenAPIDefinitions
+
+	// GetOperationIDAndTags returns operation id and tags for a restful route. It is an optional function to customize operation IDs.
+	GetOperationIDAndTags func(r *restful.Route) (string, []string, error)
+
+	// GetDefinitionName returns a friendly name for a definition base on the serving path. parameter `name` is the full name of the definition.
+	// It is an optional function to customize model names.
+	GetDefinitionName func(name string) (string, spec.Extensions)
+
+	// PostProcessSpec runs after the spec is ready to serve. It allows a final modification to the spec before serving.
+	PostProcessSpec func(*spec.Swagger) (*spec.Swagger, error)
+
+	// SecurityDefinitions is list of all security definitions for OpenAPI service. If this is not nil, the user of config
+	// is responsible to provide DefaultSecurity and (maybe) add unauthorized response to CommonResponses.
+	SecurityDefinitions *spec.SecurityDefinitions
+
+	// DefaultSecurity for all operations. This will pass as spec.SwaggerProps.Security to OpenAPI.
+	// For most cases, this will be list of acceptable definitions in SecurityDefinitions.
+	DefaultSecurity []map[string][]string
+}
+
+var schemaTypeFormatMap = map[string][]string{
+	"uint":        {"integer", "int32"},
+	"uint8":       {"integer", "byte"},
+	"uint16":      {"integer", "int32"},
+	"uint32":      {"integer", "int64"},
+	"uint64":      {"integer", "int64"},
+	"int":         {"integer", "int32"},
+	"int8":        {"integer", "byte"},
+	"int16":       {"integer", "int32"},
+	"int32":       {"integer", "int32"},
+	"int64":       {"integer", "int64"},
+	"byte":        {"integer", "byte"},
+	"float64":     {"number", "double"},
+	"float32":     {"number", "float"},
+	"bool":        {"boolean", ""},
+	"time.Time":   {"string", "date-time"},
+	"string":      {"string", ""},
+	"integer":     {"integer", ""},
+	"number":      {"number", ""},
+	"boolean":     {"boolean", ""},
+	"[]byte":      {"string", "byte"}, // base64 encoded characters
+	"interface{}": {"object", ""},
+}
+
+// This function is a reference for converting go (or any custom type) to a simple open API type,format pair. There are
+// two ways to customize spec for a type. If you add it here, a type will be converted to a simple type and the type
+// comment (the comment that is added before type definition) will be lost. The spec will still have the property
+// comment. The second way is to implement OpenAPIDefinitionGetter interface. That function can customize the spec (so
+// the spec does not need to be simple type,format) or can even return a simple type,format (e.g. IntOrString). For simple
+// type formats, the benefit of adding OpenAPIDefinitionGetter interface is to keep both type and property documentation.
+// Example:
+// type Sample struct {
+//      ...
+//      // port of the server
+//      port IntOrString
+//      ...
+// }
+// // IntOrString documentation...
+// type IntOrString { ... }
+//
+// Adding IntOrString to this function:
+// "port" : {
+//           format:      "string",
+//           type:        "int-or-string",
+//           Description: "port of the server"
+// }
+//
+// Implement OpenAPIDefinitionGetter for IntOrString:
+//
+// "port" : {
+//           $Ref:    "#/definitions/IntOrString"
+//           Description: "port of the server"
+// }
+// ...
+// definitions:
+// {
+//           "IntOrString": {
+//                     format:      "string",
+//                     type:        "int-or-string",
+//                     Description: "IntOrString documentation..."    // new
+//           }
+// }
+//
+func GetOpenAPITypeFormat(typeName string) (string, string) {
+	mapped, ok := schemaTypeFormatMap[typeName]
+	if !ok {
+		return "", ""
+	}
+	return mapped[0], mapped[1]
+}
+
+func EscapeJsonPointer(p string) string {
+	// Escaping reference name using rfc6901
+	p = strings.Replace(p, "~", "~0", -1)
+	p = strings.Replace(p, "/", "~1", -1)
+	return p
+}
+
+func EmbedOpenAPIDefinitionIntoV2Extension(main OpenAPIDefinition, embedded OpenAPIDefinition) OpenAPIDefinition {
+	if main.Schema.Extensions == nil {
+		main.Schema.Extensions = make(map[string]interface{})
+	}
+	main.Schema.Extensions[ExtensionV2Schema] = embedded.Schema
+	return main
+}
diff --git a/vendor/k8s.io/kube-openapi/pkg/common/doc.go b/vendor/k8s.io/kube-openapi/pkg/common/doc.go
new file mode 100644
index 00000000..2ba6d247
--- /dev/null
+++ b/vendor/k8s.io/kube-openapi/pkg/common/doc.go
@@ -0,0 +1,19 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// package common holds shared code and types between open API code
+// generator and spec generator.
+package common
diff --git a/vendor/k8s.io/kube-openapi/pkg/util/proto/validation/errors.go b/vendor/k8s.io/kube-openapi/pkg/util/proto/validation/errors.go
new file mode 100644
index 00000000..b1aa8c00
--- /dev/null
+++ b/vendor/k8s.io/kube-openapi/pkg/util/proto/validation/errors.go
@@ -0,0 +1,79 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package validation
+
+import (
+	"fmt"
+)
+
+type errors struct {
+	errors []error
+}
+
+func (e *errors) Errors() []error {
+	return e.errors
+}
+
+func (e *errors) AppendErrors(err ...error) {
+	e.errors = append(e.errors, err...)
+}
+
+type ValidationError struct {
+	Path string
+	Err  error
+}
+
+func (e ValidationError) Error() string {
+	return fmt.Sprintf("ValidationError(%s): %v", e.Path, e.Err)
+}
+
+type InvalidTypeError struct {
+	Path     string
+	Expected string
+	Actual   string
+}
+
+func (e InvalidTypeError) Error() string {
+	return fmt.Sprintf("invalid type for %s: got %q, expected %q", e.Path, e.Actual, e.Expected)
+}
+
+type MissingRequiredFieldError struct {
+	Path  string
+	Field string
+}
+
+func (e MissingRequiredFieldError) Error() string {
+	return fmt.Sprintf("missing required field %q in %s", e.Field, e.Path)
+}
+
+type UnknownFieldError struct {
+	Path  string
+	Field string
+}
+
+func (e UnknownFieldError) Error() string {
+	return fmt.Sprintf("unknown field %q in %s", e.Field, e.Path)
+}
+
+type InvalidObjectTypeError struct {
+	Path string
+	Type string
+}
+
+func (e InvalidObjectTypeError) Error() string {
+	return fmt.Sprintf("unknown object type %q in %s", e.Type, e.Path)
+}
diff --git a/vendor/k8s.io/kube-openapi/pkg/util/proto/validation/types.go b/vendor/k8s.io/kube-openapi/pkg/util/proto/validation/types.go
new file mode 100644
index 00000000..6a9f68c0
--- /dev/null
+++ b/vendor/k8s.io/kube-openapi/pkg/util/proto/validation/types.go
@@ -0,0 +1,299 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package validation
+
+import (
+	"reflect"
+	"sort"
+
+	"k8s.io/kube-openapi/pkg/util/proto"
+)
+
+type validationItem interface {
+	proto.SchemaVisitor
+
+	Errors() []error
+	Path() *proto.Path
+}
+
+type baseItem struct {
+	errors errors
+	path   proto.Path
+}
+
+// Errors returns the list of errors found for this item.
+func (item *baseItem) Errors() []error {
+	return item.errors.Errors()
+}
+
+// AddValidationError wraps the given error into a ValidationError and
+// attaches it to this item.
+func (item *baseItem) AddValidationError(err error) {
+	item.errors.AppendErrors(ValidationError{Path: item.path.String(), Err: err})
+}
+
+// AddError adds a regular (non-validation related) error to the list.
+func (item *baseItem) AddError(err error) {
+	item.errors.AppendErrors(err)
+}
+
+// CopyErrors adds a list of errors to this item. This is useful to copy
+// errors from subitems.
+func (item *baseItem) CopyErrors(errs []error) {
+	item.errors.AppendErrors(errs...)
+}
+
+// Path returns the path of this item, helps print useful errors.
+func (item *baseItem) Path() *proto.Path {
+	return &item.path
+}
+
+// mapItem represents a map entry in the yaml.
+type mapItem struct {
+	baseItem
+
+	Map map[string]interface{}
+}
+
+func (item *mapItem) sortedKeys() []string {
+	sortedKeys := []string{}
+	for key := range item.Map {
+		sortedKeys = append(sortedKeys, key)
+	}
+	sort.Strings(sortedKeys)
+	return sortedKeys
+}
+
+var _ validationItem = &mapItem{}
+
+func (item *mapItem) VisitPrimitive(schema *proto.Primitive) {
+	item.AddValidationError(InvalidTypeError{Path: schema.GetPath().String(), Expected: schema.Type, Actual: "map"})
+}
+
+func (item *mapItem) VisitArray(schema *proto.Array) {
+	item.AddValidationError(InvalidTypeError{Path: schema.GetPath().String(), Expected: "array", Actual: "map"})
+}
+
+func (item *mapItem) VisitMap(schema *proto.Map) {
+	for _, key := range item.sortedKeys() {
+		subItem, err := itemFactory(item.Path().FieldPath(key), item.Map[key])
+		if err != nil {
+			item.AddError(err)
+			continue
+		}
+		schema.SubType.Accept(subItem)
+		item.CopyErrors(subItem.Errors())
+	}
+}
+
+func (item *mapItem) VisitKind(schema *proto.Kind) {
+	// Verify each sub-field.
+	for _, key := range item.sortedKeys() {
+		if item.Map[key] == nil {
+			continue
+		}
+		subItem, err := itemFactory(item.Path().FieldPath(key), item.Map[key])
+		if err != nil {
+			item.AddError(err)
+			continue
+		}
+		if _, ok := schema.Fields[key]; !ok {
+			item.AddValidationError(UnknownFieldError{Path: schema.GetPath().String(), Field: key})
+			continue
+		}
+		schema.Fields[key].Accept(subItem)
+		item.CopyErrors(subItem.Errors())
+	}
+
+	// Verify that all required fields are present.
+	for _, required := range schema.RequiredFields {
+		if v, ok := item.Map[required]; !ok || v == nil {
+			item.AddValidationError(MissingRequiredFieldError{Path: schema.GetPath().String(), Field: required})
+		}
+	}
+}
+
+func (item *mapItem) VisitArbitrary(schema *proto.Arbitrary) {
+}
+
+func (item *mapItem) VisitReference(schema proto.Reference) {
+	// passthrough
+	schema.SubSchema().Accept(item)
+}
+
+// arrayItem represents a yaml array.
+type arrayItem struct {
+	baseItem
+
+	Array []interface{}
+}
+
+var _ validationItem = &arrayItem{}
+
+func (item *arrayItem) VisitPrimitive(schema *proto.Primitive) {
+	item.AddValidationError(InvalidTypeError{Path: schema.GetPath().String(), Expected: schema.Type, Actual: "array"})
+}
+
+func (item *arrayItem) VisitArray(schema *proto.Array) {
+	for i, v := range item.Array {
+		path := item.Path().ArrayPath(i)
+		if v == nil {
+			item.AddValidationError(InvalidObjectTypeError{Type: "nil", Path: path.String()})
+			continue
+		}
+		subItem, err := itemFactory(path, v)
+		if err != nil {
+			item.AddError(err)
+			continue
+		}
+		schema.SubType.Accept(subItem)
+		item.CopyErrors(subItem.Errors())
+	}
+}
+
+func (item *arrayItem) VisitMap(schema *proto.Map) {
+	item.AddValidationError(InvalidTypeError{Path: schema.GetPath().String(), Expected: "map", Actual: "array"})
+}
+
+func (item *arrayItem) VisitKind(schema *proto.Kind) {
+	item.AddValidationError(InvalidTypeError{Path: schema.GetPath().String(), Expected: "map", Actual: "array"})
+}
+
+func (item *arrayItem) VisitArbitrary(schema *proto.Arbitrary) {
+}
+
+func (item *arrayItem) VisitReference(schema proto.Reference) {
+	// passthrough
+	schema.SubSchema().Accept(item)
+}
+
+// primitiveItem represents a yaml value.
+type primitiveItem struct {
+	baseItem
+
+	Value interface{}
+	Kind  string
+}
+
+var _ validationItem = &primitiveItem{}
+
+func (item *primitiveItem) VisitPrimitive(schema *proto.Primitive) {
+	// Some types of primitives can match more than one (a number
+	// can be a string, but not the other way around). Return from
+	// the switch if we have a valid possible type conversion
+	// NOTE(apelisse): This logic is blindly copied from the
+	// existing swagger logic, and I'm not sure I agree with it.
+	switch schema.Type {
+	case proto.Boolean:
+		switch item.Kind {
+		case proto.Boolean:
+			return
+		}
+	case proto.Integer:
+		switch item.Kind {
+		case proto.Integer, proto.Number:
+			return
+		}
+	case proto.Number:
+		switch item.Kind {
+		case proto.Number:
+			return
+		}
+	case proto.String:
+		return
+	}
+	// TODO(wrong): this misses "null"
+
+	item.AddValidationError(InvalidTypeError{Path: schema.GetPath().String(), Expected: schema.Type, Actual: item.Kind})
+}
+
+func (item *primitiveItem) VisitArray(schema *proto.Array) {
+	item.AddValidationError(InvalidTypeError{Path: schema.GetPath().String(), Expected: "array", Actual: item.Kind})
+}
+
+func (item *primitiveItem) VisitMap(schema *proto.Map) {
+	item.AddValidationError(InvalidTypeError{Path: schema.GetPath().String(), Expected: "map", Actual: item.Kind})
+}
+
+func (item *primitiveItem) VisitKind(schema *proto.Kind) {
+	item.AddValidationError(InvalidTypeError{Path: schema.GetPath().String(), Expected: "map", Actual: item.Kind})
+}
+
+func (item *primitiveItem) VisitArbitrary(schema *proto.Arbitrary) {
+}
+
+func (item *primitiveItem) VisitReference(schema proto.Reference) {
+	// passthrough
+	schema.SubSchema().Accept(item)
+}
+
+// itemFactory creates the relevant item type/visitor based on the current yaml type.
+func itemFactory(path proto.Path, v interface{}) (validationItem, error) {
+	// We need to special case for no-type fields in yaml (e.g. empty item in list)
+	if v == nil {
+		return nil, InvalidObjectTypeError{Type: "nil", Path: path.String()}
+	}
+	kind := reflect.TypeOf(v).Kind()
+	switch kind {
+	case reflect.Bool:
+		return &primitiveItem{
+			baseItem: baseItem{path: path},
+			Value:    v,
+			Kind:     proto.Boolean,
+		}, nil
+	case reflect.Int,
+		reflect.Int8,
+		reflect.Int16,
+		reflect.Int32,
+		reflect.Int64,
+		reflect.Uint,
+		reflect.Uint8,
+		reflect.Uint16,
+		reflect.Uint32,
+		reflect.Uint64:
+		return &primitiveItem{
+			baseItem: baseItem{path: path},
+			Value:    v,
+			Kind:     proto.Integer,
+		}, nil
+	case reflect.Float32,
+		reflect.Float64:
+		return &primitiveItem{
+			baseItem: baseItem{path: path},
+			Value:    v,
+			Kind:     proto.Number,
+		}, nil
+	case reflect.String:
+		return &primitiveItem{
+			baseItem: baseItem{path: path},
+			Value:    v,
+			Kind:     proto.String,
+		}, nil
+	case reflect.Array,
+		reflect.Slice:
+		return &arrayItem{
+			baseItem: baseItem{path: path},
+			Array:    v.([]interface{}),
+		}, nil
+	case reflect.Map:
+		return &mapItem{
+			baseItem: baseItem{path: path},
+			Map:      v.(map[string]interface{}),
+		}, nil
+	}
+	return nil, InvalidObjectTypeError{Type: kind.String(), Path: path.String()}
+}
diff --git a/vendor/k8s.io/kube-openapi/pkg/util/proto/validation/validation.go b/vendor/k8s.io/kube-openapi/pkg/util/proto/validation/validation.go
new file mode 100644
index 00000000..35310f63
--- /dev/null
+++ b/vendor/k8s.io/kube-openapi/pkg/util/proto/validation/validation.go
@@ -0,0 +1,30 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package validation
+
+import (
+	"k8s.io/kube-openapi/pkg/util/proto"
+)
+
+func ValidateModel(obj interface{}, schema proto.Schema, name string) []error {
+	rootValidation, err := itemFactory(proto.NewPath(name), obj)
+	if err != nil {
+		return []error{err}
+	}
+	schema.Accept(rootValidation)
+	return rootValidation.Errors()
+}
diff --git a/vendor/k8s.io/kubectl/pkg/cmd/util/factory.go b/vendor/k8s.io/kubectl/pkg/cmd/util/factory.go
new file mode 100644
index 00000000..d9df0bf3
--- /dev/null
+++ b/vendor/k8s.io/kubectl/pkg/cmd/util/factory.go
@@ -0,0 +1,66 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package util
+
+import (
+	"k8s.io/apimachinery/pkg/api/meta"
+	"k8s.io/cli-runtime/pkg/genericclioptions"
+	"k8s.io/cli-runtime/pkg/resource"
+	"k8s.io/client-go/dynamic"
+	"k8s.io/client-go/kubernetes"
+	restclient "k8s.io/client-go/rest"
+	"k8s.io/kubectl/pkg/util/openapi"
+	"k8s.io/kubectl/pkg/validation"
+)
+
+// Factory provides abstractions that allow the Kubectl command to be extended across multiple types
+// of resources and different API sets.
+// The rings are here for a reason. In order for composers to be able to provide alternative factory implementations
+// they need to provide low level pieces of *certain* functions so that when the factory calls back into itself
+// it uses the custom version of the function. Rather than try to enumerate everything that someone would want to override
+// we split the factory into rings, where each ring can depend on methods in an earlier ring, but cannot depend
+// upon peer methods in its own ring.
+// TODO: make the functions interfaces
+// TODO: pass the various interfaces on the factory directly into the command constructors (so the
+// commands are decoupled from the factory).
+type Factory interface {
+	genericclioptions.RESTClientGetter
+
+	// DynamicClient returns a dynamic client ready for use
+	DynamicClient() (dynamic.Interface, error)
+
+	// KubernetesClientSet gives you back an external clientset
+	KubernetesClientSet() (*kubernetes.Clientset, error)
+
+	// Returns a RESTClient for accessing Kubernetes resources or an error.
+	RESTClient() (*restclient.RESTClient, error)
+
+	// NewBuilder returns an object that assists in loading objects from both disk and the server
+	// and which implements the common patterns for CLI interactions with generic resources.
+	NewBuilder() *resource.Builder
+
+	// Returns a RESTClient for working with the specified RESTMapping or an error. This is intended
+	// for working with arbitrary resources and is not guaranteed to point to a Kubernetes APIServer.
+	ClientForMapping(mapping *meta.RESTMapping) (resource.RESTClient, error)
+	// Returns a RESTClient for working with Unstructured objects.
+	UnstructuredClientForMapping(mapping *meta.RESTMapping) (resource.RESTClient, error)
+
+	// Returns a schema that can validate objects stored on disk.
+	Validator(validate bool) (validation.Schema, error)
+	// OpenAPISchema returns the schema openapi schema definition
+	OpenAPISchema() (openapi.Resources, error)
+}
diff --git a/vendor/k8s.io/kubectl/pkg/cmd/util/factory_client_access.go b/vendor/k8s.io/kubectl/pkg/cmd/util/factory_client_access.go
new file mode 100644
index 00000000..e4cf2da0
--- /dev/null
+++ b/vendor/k8s.io/kubectl/pkg/cmd/util/factory_client_access.go
@@ -0,0 +1,177 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// this file contains factories with no other dependencies
+
+package util
+
+import (
+	"sync"
+
+	corev1 "k8s.io/api/core/v1"
+	"k8s.io/apimachinery/pkg/api/meta"
+	"k8s.io/cli-runtime/pkg/genericclioptions"
+	"k8s.io/cli-runtime/pkg/resource"
+	"k8s.io/client-go/discovery"
+	"k8s.io/client-go/dynamic"
+	"k8s.io/client-go/kubernetes"
+	restclient "k8s.io/client-go/rest"
+	"k8s.io/client-go/tools/clientcmd"
+	"k8s.io/kubectl/pkg/util/openapi"
+	openapivalidation "k8s.io/kubectl/pkg/util/openapi/validation"
+	"k8s.io/kubectl/pkg/validation"
+)
+
+type factoryImpl struct {
+	clientGetter genericclioptions.RESTClientGetter
+
+	// openAPIGetter loads and caches openapi specs
+	openAPIGetter openAPIGetter
+}
+
+type openAPIGetter struct {
+	once   sync.Once
+	getter openapi.Getter
+}
+
+func NewFactory(clientGetter genericclioptions.RESTClientGetter) Factory {
+	if clientGetter == nil {
+		panic("attempt to instantiate client_access_factory with nil clientGetter")
+	}
+
+	f := &factoryImpl{
+		clientGetter: clientGetter,
+	}
+
+	return f
+}
+
+func (f *factoryImpl) ToRESTConfig() (*restclient.Config, error) {
+	return f.clientGetter.ToRESTConfig()
+}
+
+func (f *factoryImpl) ToRESTMapper() (meta.RESTMapper, error) {
+	return f.clientGetter.ToRESTMapper()
+}
+
+func (f *factoryImpl) ToDiscoveryClient() (discovery.CachedDiscoveryInterface, error) {
+	return f.clientGetter.ToDiscoveryClient()
+}
+
+func (f *factoryImpl) ToRawKubeConfigLoader() clientcmd.ClientConfig {
+	return f.clientGetter.ToRawKubeConfigLoader()
+}
+
+func (f *factoryImpl) KubernetesClientSet() (*kubernetes.Clientset, error) {
+	clientConfig, err := f.ToRESTConfig()
+	if err != nil {
+		return nil, err
+	}
+	return kubernetes.NewForConfig(clientConfig)
+}
+
+func (f *factoryImpl) DynamicClient() (dynamic.Interface, error) {
+	clientConfig, err := f.ToRESTConfig()
+	if err != nil {
+		return nil, err
+	}
+	return dynamic.NewForConfig(clientConfig)
+}
+
+// NewBuilder returns a new resource builder for structured api objects.
+func (f *factoryImpl) NewBuilder() *resource.Builder {
+	return resource.NewBuilder(f.clientGetter)
+}
+
+func (f *factoryImpl) RESTClient() (*restclient.RESTClient, error) {
+	clientConfig, err := f.ToRESTConfig()
+	if err != nil {
+		return nil, err
+	}
+	setKubernetesDefaults(clientConfig)
+	return restclient.RESTClientFor(clientConfig)
+}
+
+func (f *factoryImpl) ClientForMapping(mapping *meta.RESTMapping) (resource.RESTClient, error) {
+	cfg, err := f.clientGetter.ToRESTConfig()
+	if err != nil {
+		return nil, err
+	}
+	if err := setKubernetesDefaults(cfg); err != nil {
+		return nil, err
+	}
+	gvk := mapping.GroupVersionKind
+	switch gvk.Group {
+	case corev1.GroupName:
+		cfg.APIPath = "/api"
+	default:
+		cfg.APIPath = "/apis"
+	}
+	gv := gvk.GroupVersion()
+	cfg.GroupVersion = &gv
+	return restclient.RESTClientFor(cfg)
+}
+
+func (f *factoryImpl) UnstructuredClientForMapping(mapping *meta.RESTMapping) (resource.RESTClient, error) {
+	cfg, err := f.clientGetter.ToRESTConfig()
+	if err != nil {
+		return nil, err
+	}
+	if err := restclient.SetKubernetesDefaults(cfg); err != nil {
+		return nil, err
+	}
+	cfg.APIPath = "/apis"
+	if mapping.GroupVersionKind.Group == corev1.GroupName {
+		cfg.APIPath = "/api"
+	}
+	gv := mapping.GroupVersionKind.GroupVersion()
+	cfg.ContentConfig = resource.UnstructuredPlusDefaultContentConfig()
+	cfg.GroupVersion = &gv
+	return restclient.RESTClientFor(cfg)
+}
+
+func (f *factoryImpl) Validator(validate bool) (validation.Schema, error) {
+	if !validate {
+		return validation.NullSchema{}, nil
+	}
+
+	resources, err := f.OpenAPISchema()
+	if err != nil {
+		return nil, err
+	}
+
+	return validation.ConjunctiveSchema{
+		openapivalidation.NewSchemaValidation(resources),
+		validation.NoDoubleKeySchema{},
+	}, nil
+}
+
+// OpenAPISchema returns metadata and structural information about Kubernetes object definitions.
+func (f *factoryImpl) OpenAPISchema() (openapi.Resources, error) {
+	discovery, err := f.clientGetter.ToDiscoveryClient()
+	if err != nil {
+		return nil, err
+	}
+
+	// Lazily initialize the OpenAPIGetter once
+	f.openAPIGetter.once.Do(func() {
+		// Create the caching OpenAPIGetter
+		f.openAPIGetter.getter = openapi.NewOpenAPIGetter(discovery)
+	})
+
+	// Delegate to the OpenAPIGetter
+	return f.openAPIGetter.getter.Get()
+}
diff --git a/vendor/k8s.io/kubectl/pkg/cmd/util/helpers.go b/vendor/k8s.io/kubectl/pkg/cmd/util/helpers.go
new file mode 100644
index 00000000..323f60c2
--- /dev/null
+++ b/vendor/k8s.io/kubectl/pkg/cmd/util/helpers.go
@@ -0,0 +1,728 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package util
+
+import (
+	"bytes"
+	"errors"
+	"fmt"
+	"io"
+	"net/url"
+	"os"
+	"strconv"
+	"strings"
+	"time"
+
+	jsonpatch "github.com/evanphx/json-patch"
+	"github.com/spf13/cobra"
+	"github.com/spf13/pflag"
+	apierrors "k8s.io/apimachinery/pkg/api/errors"
+	"k8s.io/apimachinery/pkg/api/meta"
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	"k8s.io/apimachinery/pkg/runtime"
+	utilerrors "k8s.io/apimachinery/pkg/util/errors"
+	"k8s.io/apimachinery/pkg/util/sets"
+	"k8s.io/apimachinery/pkg/util/yaml"
+	"k8s.io/cli-runtime/pkg/genericclioptions"
+	"k8s.io/cli-runtime/pkg/resource"
+	"k8s.io/client-go/dynamic"
+	"k8s.io/client-go/rest"
+	"k8s.io/client-go/scale"
+	"k8s.io/client-go/tools/clientcmd"
+	"k8s.io/klog"
+	utilexec "k8s.io/utils/exec"
+)
+
+const (
+	ApplyAnnotationsFlag = "save-config"
+	DefaultErrorExitCode = 1
+)
+
+type debugError interface {
+	DebugError() (msg string, args []interface{})
+}
+
+// AddSourceToErr adds handleResourcePrefix and source string to error message.
+// verb is the string like "creating", "deleting" etc.
+// source is the filename or URL to the template file(*.json or *.yaml), or stdin to use to handle the resource.
+func AddSourceToErr(verb string, source string, err error) error {
+	if source != "" {
+		if statusError, ok := err.(apierrors.APIStatus); ok {
+			status := statusError.Status()
+			status.Message = fmt.Sprintf("error when %s %q: %v", verb, source, status.Message)
+			return &apierrors.StatusError{ErrStatus: status}
+		}
+		return fmt.Errorf("error when %s %q: %v", verb, source, err)
+	}
+	return err
+}
+
+var fatalErrHandler = fatal
+
+// BehaviorOnFatal allows you to override the default behavior when a fatal
+// error occurs, which is to call os.Exit(code). You can pass 'panic' as a function
+// here if you prefer the panic() over os.Exit(1).
+func BehaviorOnFatal(f func(string, int)) {
+	fatalErrHandler = f
+}
+
+// DefaultBehaviorOnFatal allows you to undo any previous override.  Useful in
+// tests.
+func DefaultBehaviorOnFatal() {
+	fatalErrHandler = fatal
+}
+
+// fatal prints the message (if provided) and then exits. If V(2) or greater,
+// klog.Fatal is invoked for extended information.
+func fatal(msg string, code int) {
+	if klog.V(2) {
+		klog.FatalDepth(2, msg)
+	}
+	if len(msg) > 0 {
+		// add newline if needed
+		if !strings.HasSuffix(msg, "\n") {
+			msg += "\n"
+		}
+		fmt.Fprint(os.Stderr, msg)
+	}
+	os.Exit(code)
+}
+
+// ErrExit may be passed to CheckError to instruct it to output nothing but exit with
+// status code 1.
+var ErrExit = fmt.Errorf("exit")
+
+// CheckErr prints a user friendly error to STDERR and exits with a non-zero
+// exit code. Unrecognized errors will be printed with an "error: " prefix.
+//
+// This method is generic to the command in use and may be used by non-Kubectl
+// commands.
+func CheckErr(err error) {
+	checkErr(err, fatalErrHandler)
+}
+
+// CheckDiffErr prints a user friendly error to STDERR and exits with a
+// non-zero and non-one exit code. Unrecognized errors will be printed
+// with an "error: " prefix.
+//
+// This method is meant specifically for `kubectl diff` and may be used
+// by other commands.
+func CheckDiffErr(err error) {
+	checkErr(err, func(msg string, code int) {
+		fatalErrHandler(msg, code+1)
+	})
+}
+
+// checkErr formats a given error as a string and calls the passed handleErr
+// func with that string and an kubectl exit code.
+func checkErr(err error, handleErr func(string, int)) {
+	// unwrap aggregates of 1
+	if agg, ok := err.(utilerrors.Aggregate); ok && len(agg.Errors()) == 1 {
+		err = agg.Errors()[0]
+	}
+
+	if err == nil {
+		return
+	}
+
+	switch {
+	case err == ErrExit:
+		handleErr("", DefaultErrorExitCode)
+	case apierrors.IsInvalid(err):
+		details := err.(*apierrors.StatusError).Status().Details
+		s := "The request is invalid"
+		if details == nil {
+			handleErr(s, DefaultErrorExitCode)
+			return
+		}
+		if len(details.Kind) != 0 || len(details.Name) != 0 {
+			s = fmt.Sprintf("The %s %q is invalid", details.Kind, details.Name)
+		}
+		if len(details.Causes) > 0 {
+			errs := statusCausesToAggrError(details.Causes)
+			handleErr(MultilineError(s+": ", errs), DefaultErrorExitCode)
+		} else {
+			handleErr(s, DefaultErrorExitCode)
+		}
+	case clientcmd.IsConfigurationInvalid(err):
+		handleErr(MultilineError("Error in configuration: ", err), DefaultErrorExitCode)
+	default:
+		switch err := err.(type) {
+		case *meta.NoResourceMatchError:
+			switch {
+			case len(err.PartialResource.Group) > 0 && len(err.PartialResource.Version) > 0:
+				handleErr(fmt.Sprintf("the server doesn't have a resource type %q in group %q and version %q", err.PartialResource.Resource, err.PartialResource.Group, err.PartialResource.Version), DefaultErrorExitCode)
+			case len(err.PartialResource.Group) > 0:
+				handleErr(fmt.Sprintf("the server doesn't have a resource type %q in group %q", err.PartialResource.Resource, err.PartialResource.Group), DefaultErrorExitCode)
+			case len(err.PartialResource.Version) > 0:
+				handleErr(fmt.Sprintf("the server doesn't have a resource type %q in version %q", err.PartialResource.Resource, err.PartialResource.Version), DefaultErrorExitCode)
+			default:
+				handleErr(fmt.Sprintf("the server doesn't have a resource type %q", err.PartialResource.Resource), DefaultErrorExitCode)
+			}
+		case utilerrors.Aggregate:
+			handleErr(MultipleErrors(``, err.Errors()), DefaultErrorExitCode)
+		case utilexec.ExitError:
+			handleErr(err.Error(), err.ExitStatus())
+		default: // for any other error type
+			msg, ok := StandardErrorMessage(err)
+			if !ok {
+				msg = err.Error()
+				if !strings.HasPrefix(msg, "error: ") {
+					msg = fmt.Sprintf("error: %s", msg)
+				}
+			}
+			handleErr(msg, DefaultErrorExitCode)
+		}
+	}
+}
+
+func statusCausesToAggrError(scs []metav1.StatusCause) utilerrors.Aggregate {
+	errs := make([]error, 0, len(scs))
+	errorMsgs := sets.NewString()
+	for _, sc := range scs {
+		// check for duplicate error messages and skip them
+		msg := fmt.Sprintf("%s: %s", sc.Field, sc.Message)
+		if errorMsgs.Has(msg) {
+			continue
+		}
+		errorMsgs.Insert(msg)
+		errs = append(errs, errors.New(msg))
+	}
+	return utilerrors.NewAggregate(errs)
+}
+
+// StandardErrorMessage translates common errors into a human readable message, or returns
+// false if the error is not one of the recognized types. It may also log extended
+// information to klog.
+//
+// This method is generic to the command in use and may be used by non-Kubectl
+// commands.
+func StandardErrorMessage(err error) (string, bool) {
+	if debugErr, ok := err.(debugError); ok {
+		klog.V(4).Infof(debugErr.DebugError())
+	}
+	status, isStatus := err.(apierrors.APIStatus)
+	switch {
+	case isStatus:
+		switch s := status.Status(); {
+		case s.Reason == metav1.StatusReasonUnauthorized:
+			return fmt.Sprintf("error: You must be logged in to the server (%s)", s.Message), true
+		case len(s.Reason) > 0:
+			return fmt.Sprintf("Error from server (%s): %s", s.Reason, err.Error()), true
+		default:
+			return fmt.Sprintf("Error from server: %s", err.Error()), true
+		}
+	case apierrors.IsUnexpectedObjectError(err):
+		return fmt.Sprintf("Server returned an unexpected response: %s", err.Error()), true
+	}
+	switch t := err.(type) {
+	case *url.Error:
+		klog.V(4).Infof("Connection error: %s %s: %v", t.Op, t.URL, t.Err)
+		switch {
+		case strings.Contains(t.Err.Error(), "connection refused"):
+			host := t.URL
+			if server, err := url.Parse(t.URL); err == nil {
+				host = server.Host
+			}
+			return fmt.Sprintf("The connection to the server %s was refused - did you specify the right host or port?", host), true
+		}
+		return fmt.Sprintf("Unable to connect to the server: %v", t.Err), true
+	}
+	return "", false
+}
+
+// MultilineError returns a string representing an error that splits sub errors into their own
+// lines. The returned string will end with a newline.
+func MultilineError(prefix string, err error) string {
+	if agg, ok := err.(utilerrors.Aggregate); ok {
+		errs := utilerrors.Flatten(agg).Errors()
+		buf := &bytes.Buffer{}
+		switch len(errs) {
+		case 0:
+			return fmt.Sprintf("%s%v\n", prefix, err)
+		case 1:
+			return fmt.Sprintf("%s%v\n", prefix, messageForError(errs[0]))
+		default:
+			fmt.Fprintln(buf, prefix)
+			for _, err := range errs {
+				fmt.Fprintf(buf, "* %v\n", messageForError(err))
+			}
+			return buf.String()
+		}
+	}
+	return fmt.Sprintf("%s%s\n", prefix, err)
+}
+
+// PrintErrorWithCauses prints an error's kind, name, and each of the error's causes in a new line.
+// The returned string will end with a newline.
+// Returns true if a case exists to handle the error type, or false otherwise.
+func PrintErrorWithCauses(err error, errOut io.Writer) bool {
+	switch t := err.(type) {
+	case *apierrors.StatusError:
+		errorDetails := t.Status().Details
+		if errorDetails != nil {
+			fmt.Fprintf(errOut, "error: %s %q is invalid\n\n", errorDetails.Kind, errorDetails.Name)
+			for _, cause := range errorDetails.Causes {
+				fmt.Fprintf(errOut, "* %s: %s\n", cause.Field, cause.Message)
+			}
+			return true
+		}
+	}
+
+	fmt.Fprintf(errOut, "error: %v\n", err)
+	return false
+}
+
+// MultipleErrors returns a newline delimited string containing
+// the prefix and referenced errors in standard form.
+func MultipleErrors(prefix string, errs []error) string {
+	buf := &bytes.Buffer{}
+	for _, err := range errs {
+		fmt.Fprintf(buf, "%s%v\n", prefix, messageForError(err))
+	}
+	return buf.String()
+}
+
+// messageForError returns the string representing the error.
+func messageForError(err error) string {
+	msg, ok := StandardErrorMessage(err)
+	if !ok {
+		msg = err.Error()
+	}
+	return msg
+}
+
+func UsageErrorf(cmd *cobra.Command, format string, args ...interface{}) error {
+	msg := fmt.Sprintf(format, args...)
+	return fmt.Errorf("%s\nSee '%s -h' for help and examples", msg, cmd.CommandPath())
+}
+
+func IsFilenameSliceEmpty(filenames []string, directory string) bool {
+	return len(filenames) == 0 && directory == ""
+}
+
+func GetFlagString(cmd *cobra.Command, flag string) string {
+	s, err := cmd.Flags().GetString(flag)
+	if err != nil {
+		klog.Fatalf("error accessing flag %s for command %s: %v", flag, cmd.Name(), err)
+	}
+	return s
+}
+
+// GetFlagStringSlice can be used to accept multiple argument with flag repetition (e.g. -f arg1,arg2 -f arg3 ...)
+func GetFlagStringSlice(cmd *cobra.Command, flag string) []string {
+	s, err := cmd.Flags().GetStringSlice(flag)
+	if err != nil {
+		klog.Fatalf("error accessing flag %s for command %s: %v", flag, cmd.Name(), err)
+	}
+	return s
+}
+
+// GetFlagStringArray can be used to accept multiple argument with flag repetition (e.g. -f arg1 -f arg2 ...)
+func GetFlagStringArray(cmd *cobra.Command, flag string) []string {
+	s, err := cmd.Flags().GetStringArray(flag)
+	if err != nil {
+		klog.Fatalf("error accessing flag %s for command %s: %v", flag, cmd.Name(), err)
+	}
+	return s
+}
+
+func GetFlagBool(cmd *cobra.Command, flag string) bool {
+	b, err := cmd.Flags().GetBool(flag)
+	if err != nil {
+		klog.Fatalf("error accessing flag %s for command %s: %v", flag, cmd.Name(), err)
+	}
+	return b
+}
+
+// Assumes the flag has a default value.
+func GetFlagInt(cmd *cobra.Command, flag string) int {
+	i, err := cmd.Flags().GetInt(flag)
+	if err != nil {
+		klog.Fatalf("error accessing flag %s for command %s: %v", flag, cmd.Name(), err)
+	}
+	return i
+}
+
+// Assumes the flag has a default value.
+func GetFlagInt32(cmd *cobra.Command, flag string) int32 {
+	i, err := cmd.Flags().GetInt32(flag)
+	if err != nil {
+		klog.Fatalf("error accessing flag %s for command %s: %v", flag, cmd.Name(), err)
+	}
+	return i
+}
+
+// Assumes the flag has a default value.
+func GetFlagInt64(cmd *cobra.Command, flag string) int64 {
+	i, err := cmd.Flags().GetInt64(flag)
+	if err != nil {
+		klog.Fatalf("error accessing flag %s for command %s: %v", flag, cmd.Name(), err)
+	}
+	return i
+}
+
+func GetFlagDuration(cmd *cobra.Command, flag string) time.Duration {
+	d, err := cmd.Flags().GetDuration(flag)
+	if err != nil {
+		klog.Fatalf("error accessing flag %s for command %s: %v", flag, cmd.Name(), err)
+	}
+	return d
+}
+
+func GetPodRunningTimeoutFlag(cmd *cobra.Command) (time.Duration, error) {
+	timeout := GetFlagDuration(cmd, "pod-running-timeout")
+	if timeout <= 0 {
+		return timeout, fmt.Errorf("--pod-running-timeout must be higher than zero")
+	}
+	return timeout, nil
+}
+
+func AddValidateFlags(cmd *cobra.Command) {
+	cmd.Flags().Bool("validate", true, "If true, use a schema to validate the input before sending it")
+}
+
+func AddValidateOptionFlags(cmd *cobra.Command, options *ValidateOptions) {
+	cmd.Flags().BoolVar(&options.EnableValidation, "validate", options.EnableValidation, "If true, use a schema to validate the input before sending it")
+}
+
+func AddFilenameOptionFlags(cmd *cobra.Command, options *resource.FilenameOptions, usage string) {
+	AddJsonFilenameFlag(cmd.Flags(), &options.Filenames, "Filename, directory, or URL to files "+usage)
+	AddKustomizeFlag(cmd.Flags(), &options.Kustomize)
+	cmd.Flags().BoolVarP(&options.Recursive, "recursive", "R", options.Recursive, "Process the directory used in -f, --filename recursively. Useful when you want to manage related manifests organized within the same directory.")
+}
+
+func AddJsonFilenameFlag(flags *pflag.FlagSet, value *[]string, usage string) {
+	flags.StringSliceVarP(value, "filename", "f", *value, usage)
+	annotations := make([]string, 0, len(resource.FileExtensions))
+	for _, ext := range resource.FileExtensions {
+		annotations = append(annotations, strings.TrimLeft(ext, "."))
+	}
+	flags.SetAnnotation("filename", cobra.BashCompFilenameExt, annotations)
+}
+
+// AddKustomizeFlag adds kustomize flag to a command
+func AddKustomizeFlag(flags *pflag.FlagSet, value *string) {
+	flags.StringVarP(value, "kustomize", "k", *value, "Process the kustomization directory. This flag can't be used together with -f or -R.")
+}
+
+// AddDryRunFlag adds dry-run flag to a command. Usually used by mutations.
+func AddDryRunFlag(cmd *cobra.Command) {
+	cmd.Flags().String(
+		"dry-run",
+		"none",
+		`Must be "none", "server", or "client". If client strategy, only print the object that would be sent, without sending it. If server strategy, submit server-side request without persisting the resource.`,
+	)
+	cmd.Flags().Lookup("dry-run").NoOptDefVal = "unchanged"
+}
+
+func AddServerSideApplyFlags(cmd *cobra.Command) {
+	cmd.Flags().Bool("server-side", false, "If true, apply runs in the server instead of the client.")
+	cmd.Flags().Bool("force-conflicts", false, "If true, server-side apply will force the changes against conflicts.")
+	cmd.Flags().String("field-manager", "kubectl", "Name of the manager used to track field ownership.")
+}
+
+func AddPodRunningTimeoutFlag(cmd *cobra.Command, defaultTimeout time.Duration) {
+	cmd.Flags().Duration("pod-running-timeout", defaultTimeout, "The length of time (like 5s, 2m, or 3h, higher than zero) to wait until at least one pod is running")
+}
+
+func AddApplyAnnotationFlags(cmd *cobra.Command) {
+	cmd.Flags().Bool(ApplyAnnotationsFlag, false, "If true, the configuration of current object will be saved in its annotation. Otherwise, the annotation will be unchanged. This flag is useful when you want to perform kubectl apply on this object in the future.")
+}
+
+func AddApplyAnnotationVarFlags(cmd *cobra.Command, applyAnnotation *bool) {
+	cmd.Flags().BoolVar(applyAnnotation, ApplyAnnotationsFlag, *applyAnnotation, "If true, the configuration of current object will be saved in its annotation. Otherwise, the annotation will be unchanged. This flag is useful when you want to perform kubectl apply on this object in the future.")
+}
+
+// AddGeneratorFlags adds flags common to resource generation commands
+// TODO: need to take a pass at other generator commands to use this set of flags
+func AddGeneratorFlags(cmd *cobra.Command, defaultGenerator string) {
+	cmd.Flags().String("generator", defaultGenerator, "The name of the API generator to use.")
+	cmd.Flags().MarkDeprecated("generator", "has no effect and will be removed in the future.")
+	AddDryRunFlag(cmd)
+}
+
+type ValidateOptions struct {
+	EnableValidation bool
+}
+
+// Merge requires JSON serialization
+// TODO: merge assumes JSON serialization, and does not properly abstract API retrieval
+func Merge(codec runtime.Codec, dst runtime.Object, fragment string) (runtime.Object, error) {
+	// encode dst into versioned json and apply fragment directly too it
+	target, err := runtime.Encode(codec, dst)
+	if err != nil {
+		return nil, err
+	}
+	patched, err := jsonpatch.MergePatch(target, []byte(fragment))
+	if err != nil {
+		return nil, err
+	}
+	out, err := runtime.Decode(codec, patched)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+// DumpReaderToFile writes all data from the given io.Reader to the specified file
+// (usually for temporary use).
+func DumpReaderToFile(reader io.Reader, filename string) error {
+	f, err := os.OpenFile(filename, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600)
+	if err != nil {
+		return err
+	}
+	defer f.Close()
+
+	buffer := make([]byte, 1024)
+	for {
+		count, err := reader.Read(buffer)
+		if err == io.EOF {
+			break
+		}
+		if err != nil {
+			return err
+		}
+		_, err = f.Write(buffer[:count])
+		if err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+func GetServerSideApplyFlag(cmd *cobra.Command) bool {
+	return GetFlagBool(cmd, "server-side")
+}
+
+func GetForceConflictsFlag(cmd *cobra.Command) bool {
+	return GetFlagBool(cmd, "force-conflicts")
+}
+
+func GetFieldManagerFlag(cmd *cobra.Command) string {
+	return GetFlagString(cmd, "field-manager")
+}
+
+type DryRunStrategy int
+
+const (
+	DryRunNone DryRunStrategy = iota
+	DryRunClient
+	DryRunServer
+)
+
+func GetDryRunStrategy(cmd *cobra.Command) (DryRunStrategy, error) {
+	var dryRunFlag = GetFlagString(cmd, "dry-run")
+	b, err := strconv.ParseBool(dryRunFlag)
+	// The flag is not a boolean
+	if err != nil {
+		switch dryRunFlag {
+		case cmd.Flag("dry-run").NoOptDefVal:
+			klog.Warning(`--dry-run is deprecated and can be replaced with --dry-run=client.`)
+			return DryRunClient, nil
+		case "client":
+			return DryRunClient, nil
+		case "server":
+			return DryRunServer, nil
+		case "none":
+			return DryRunNone, nil
+		default:
+			return DryRunNone, fmt.Errorf(`Invalid dry-run value (%v). Must be "none", "server", or "client".`, dryRunFlag)
+		}
+	}
+	// The flag was a boolean
+	if b {
+		klog.Warningf(`--dry-run=%v is deprecated (boolean value) and can be replaced with --dry-run=%s.`, dryRunFlag, "client")
+		return DryRunClient, nil
+	}
+	klog.Warningf(`--dry-run=%v is deprecated (boolean value) and can be replaced with --dry-run=%s.`, dryRunFlag, "none")
+	return DryRunNone, nil
+}
+
+// PrintFlagsWithDryRunStrategy sets a success message at print time for the dry run strategy
+//
+// TODO(juanvallejo): This can be cleaned up even further by creating
+// a PrintFlags struct that binds the --dry-run flag, and whose
+// ToPrinter method returns a printer that understands how to print
+// this success message.
+func PrintFlagsWithDryRunStrategy(printFlags *genericclioptions.PrintFlags, dryRunStrategy DryRunStrategy) *genericclioptions.PrintFlags {
+	switch dryRunStrategy {
+	case DryRunClient:
+		printFlags.Complete("%s (dry run)")
+	case DryRunServer:
+		printFlags.Complete("%s (server dry run)")
+	}
+	return printFlags
+}
+
+// GetResourcesAndPairs retrieves resources and "KEY=VALUE or KEY-" pair args from given args
+func GetResourcesAndPairs(args []string, pairType string) (resources []string, pairArgs []string, err error) {
+	foundPair := false
+	for _, s := range args {
+		nonResource := (strings.Contains(s, "=") && s[0] != '=') || (strings.HasSuffix(s, "-") && s != "-")
+		switch {
+		case !foundPair && nonResource:
+			foundPair = true
+			fallthrough
+		case foundPair && nonResource:
+			pairArgs = append(pairArgs, s)
+		case !foundPair && !nonResource:
+			resources = append(resources, s)
+		case foundPair && !nonResource:
+			err = fmt.Errorf("all resources must be specified before %s changes: %s", pairType, s)
+			return
+		}
+	}
+	return
+}
+
+// ParsePairs retrieves new and remove pairs (if supportRemove is true) from "KEY=VALUE or KEY-" pair args
+func ParsePairs(pairArgs []string, pairType string, supportRemove bool) (newPairs map[string]string, removePairs []string, err error) {
+	newPairs = map[string]string{}
+	if supportRemove {
+		removePairs = []string{}
+	}
+	var invalidBuf bytes.Buffer
+	var invalidBufNonEmpty bool
+	for _, pairArg := range pairArgs {
+		if strings.Contains(pairArg, "=") && pairArg[0] != '=' {
+			parts := strings.SplitN(pairArg, "=", 2)
+			if len(parts) != 2 {
+				if invalidBufNonEmpty {
+					invalidBuf.WriteString(", ")
+				}
+				invalidBuf.WriteString(pairArg)
+				invalidBufNonEmpty = true
+			} else {
+				newPairs[parts[0]] = parts[1]
+			}
+		} else if supportRemove && strings.HasSuffix(pairArg, "-") && pairArg != "-" {
+			removePairs = append(removePairs, pairArg[:len(pairArg)-1])
+		} else {
+			if invalidBufNonEmpty {
+				invalidBuf.WriteString(", ")
+			}
+			invalidBuf.WriteString(pairArg)
+			invalidBufNonEmpty = true
+		}
+	}
+	if invalidBufNonEmpty {
+		err = fmt.Errorf("invalid %s format: %s", pairType, invalidBuf.String())
+		return
+	}
+
+	return
+}
+
+// IsSiblingCommandExists receives a pointer to a cobra command and a target string.
+// Returns true if the target string is found in the list of sibling commands.
+func IsSiblingCommandExists(cmd *cobra.Command, targetCmdName string) bool {
+	for _, c := range cmd.Parent().Commands() {
+		if c.Name() == targetCmdName {
+			return true
+		}
+	}
+
+	return false
+}
+
+// DefaultSubCommandRun prints a command's help string to the specified output if no
+// arguments (sub-commands) are provided, or a usage error otherwise.
+func DefaultSubCommandRun(out io.Writer) func(c *cobra.Command, args []string) {
+	return func(c *cobra.Command, args []string) {
+		c.SetOutput(out)
+		RequireNoArguments(c, args)
+		c.Help()
+		CheckErr(ErrExit)
+	}
+}
+
+// RequireNoArguments exits with a usage error if extra arguments are provided.
+func RequireNoArguments(c *cobra.Command, args []string) {
+	if len(args) > 0 {
+		CheckErr(UsageErrorf(c, "unknown command %q", strings.Join(args, " ")))
+	}
+}
+
+// StripComments will transform a YAML file into JSON, thus dropping any comments
+// in it. Note that if the given file has a syntax error, the transformation will
+// fail and we will manually drop all comments from the file.
+func StripComments(file []byte) []byte {
+	stripped := file
+	stripped, err := yaml.ToJSON(stripped)
+	if err != nil {
+		stripped = ManualStrip(file)
+	}
+	return stripped
+}
+
+// ManualStrip is used for dropping comments from a YAML file
+func ManualStrip(file []byte) []byte {
+	stripped := []byte{}
+	lines := bytes.Split(file, []byte("\n"))
+	for i, line := range lines {
+		if bytes.HasPrefix(bytes.TrimSpace(line), []byte("#")) {
+			continue
+		}
+		stripped = append(stripped, line...)
+		if i < len(lines)-1 {
+			stripped = append(stripped, '\n')
+		}
+	}
+	return stripped
+}
+
+// ScaleClientFunc provides a ScalesGetter
+type ScaleClientFunc func(genericclioptions.RESTClientGetter) (scale.ScalesGetter, error)
+
+// ScaleClientFn gives a way to easily override the function for unit testing if needed.
+var ScaleClientFn ScaleClientFunc = scaleClient
+
+// scaleClient gives you back scale getter
+func scaleClient(restClientGetter genericclioptions.RESTClientGetter) (scale.ScalesGetter, error) {
+	discoveryClient, err := restClientGetter.ToDiscoveryClient()
+	if err != nil {
+		return nil, err
+	}
+
+	clientConfig, err := restClientGetter.ToRESTConfig()
+	if err != nil {
+		return nil, err
+	}
+
+	setKubernetesDefaults(clientConfig)
+	restClient, err := rest.RESTClientFor(clientConfig)
+	if err != nil {
+		return nil, err
+	}
+	resolver := scale.NewDiscoveryScaleKindResolver(discoveryClient)
+	mapper, err := restClientGetter.ToRESTMapper()
+	if err != nil {
+		return nil, err
+	}
+
+	return scale.New(restClient, mapper, dynamic.LegacyAPIPathResolverFunc, resolver), nil
+}
+
+func Warning(cmdErr io.Writer, newGeneratorName, oldGeneratorName string) {
+	fmt.Fprintf(cmdErr, "WARNING: New generator %q specified, "+
+		"but it isn't available. "+
+		"Falling back to %q.\n",
+		newGeneratorName,
+		oldGeneratorName,
+	)
+}
diff --git a/vendor/k8s.io/kubectl/pkg/cmd/util/kubectl_match_version.go b/vendor/k8s.io/kubectl/pkg/cmd/util/kubectl_match_version.go
new file mode 100644
index 00000000..74308bc5
--- /dev/null
+++ b/vendor/k8s.io/kubectl/pkg/cmd/util/kubectl_match_version.go
@@ -0,0 +1,129 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package util
+
+import (
+	"sync"
+
+	"github.com/spf13/pflag"
+
+	"k8s.io/apimachinery/pkg/api/meta"
+	"k8s.io/apimachinery/pkg/runtime/schema"
+	"k8s.io/client-go/discovery"
+	"k8s.io/client-go/rest"
+	"k8s.io/client-go/tools/clientcmd"
+	"k8s.io/kubectl/pkg/scheme"
+
+	"k8s.io/cli-runtime/pkg/genericclioptions"
+	"k8s.io/component-base/version"
+)
+
+const (
+	flagMatchBinaryVersion = "match-server-version"
+)
+
+// MatchVersionFlags is for setting the "match server version" function.
+type MatchVersionFlags struct {
+	Delegate genericclioptions.RESTClientGetter
+
+	RequireMatchedServerVersion bool
+	checkServerVersion          sync.Once
+	matchesServerVersionErr     error
+}
+
+var _ genericclioptions.RESTClientGetter = &MatchVersionFlags{}
+
+func (f *MatchVersionFlags) checkMatchingServerVersion() error {
+	f.checkServerVersion.Do(func() {
+		if !f.RequireMatchedServerVersion {
+			return
+		}
+		discoveryClient, err := f.Delegate.ToDiscoveryClient()
+		if err != nil {
+			f.matchesServerVersionErr = err
+			return
+		}
+		f.matchesServerVersionErr = discovery.MatchesServerVersion(version.Get(), discoveryClient)
+	})
+
+	return f.matchesServerVersionErr
+}
+
+// ToRESTConfig implements RESTClientGetter.
+// Returns a REST client configuration based on a provided path
+// to a .kubeconfig file, loading rules, and config flag overrides.
+// Expects the AddFlags method to have been called.
+func (f *MatchVersionFlags) ToRESTConfig() (*rest.Config, error) {
+	if err := f.checkMatchingServerVersion(); err != nil {
+		return nil, err
+	}
+	clientConfig, err := f.Delegate.ToRESTConfig()
+	if err != nil {
+		return nil, err
+	}
+	// TODO we should not have to do this.  It smacks of something going wrong.
+	setKubernetesDefaults(clientConfig)
+	return clientConfig, nil
+}
+
+func (f *MatchVersionFlags) ToRawKubeConfigLoader() clientcmd.ClientConfig {
+	return f.Delegate.ToRawKubeConfigLoader()
+}
+
+func (f *MatchVersionFlags) ToDiscoveryClient() (discovery.CachedDiscoveryInterface, error) {
+	if err := f.checkMatchingServerVersion(); err != nil {
+		return nil, err
+	}
+	return f.Delegate.ToDiscoveryClient()
+}
+
+// ToRESTMapper returns a mapper.
+func (f *MatchVersionFlags) ToRESTMapper() (meta.RESTMapper, error) {
+	if err := f.checkMatchingServerVersion(); err != nil {
+		return nil, err
+	}
+	return f.Delegate.ToRESTMapper()
+}
+
+func (f *MatchVersionFlags) AddFlags(flags *pflag.FlagSet) {
+	flags.BoolVar(&f.RequireMatchedServerVersion, flagMatchBinaryVersion, f.RequireMatchedServerVersion, "Require server version to match client version")
+}
+
+func NewMatchVersionFlags(delegate genericclioptions.RESTClientGetter) *MatchVersionFlags {
+	return &MatchVersionFlags{
+		Delegate: delegate,
+	}
+}
+
+// setKubernetesDefaults sets default values on the provided client config for accessing the
+// Kubernetes API or returns an error if any of the defaults are impossible or invalid.
+// TODO this isn't what we want.  Each clientset should be setting defaults as it sees fit.
+func setKubernetesDefaults(config *rest.Config) error {
+	// TODO remove this hack.  This is allowing the GetOptions to be serialized.
+	config.GroupVersion = &schema.GroupVersion{Group: "", Version: "v1"}
+
+	if config.APIPath == "" {
+		config.APIPath = "/api"
+	}
+	if config.NegotiatedSerializer == nil {
+		// This codec factory ensures the resources are not converted. Therefore, resources
+		// will not be round-tripped through internal versions. Defaulting does not happen
+		// on the client.
+		config.NegotiatedSerializer = scheme.Codecs.WithoutConversion()
+	}
+	return rest.SetKubernetesDefaults(config)
+}
diff --git a/vendor/k8s.io/kubectl/pkg/cmd/util/printing.go b/vendor/k8s.io/kubectl/pkg/cmd/util/printing.go
new file mode 100644
index 00000000..ebd22882
--- /dev/null
+++ b/vendor/k8s.io/kubectl/pkg/cmd/util/printing.go
@@ -0,0 +1,29 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package util
+
+import (
+	"fmt"
+
+	"k8s.io/kubectl/pkg/util/templates"
+)
+
+// SuggestAPIResources returns a suggestion to use the "api-resources" command
+// to retrieve a supported list of resources
+func SuggestAPIResources(parent string) string {
+	return templates.LongDesc(fmt.Sprintf("Use \"%s api-resources\" for a complete list of supported resources.", parent))
+}
diff --git a/vendor/k8s.io/kubectl/pkg/drain/cordon.go b/vendor/k8s.io/kubectl/pkg/drain/cordon.go
index 8f0f56d2..cfe2b8b9 100644
--- a/vendor/k8s.io/kubectl/pkg/drain/cordon.go
+++ b/vendor/k8s.io/kubectl/pkg/drain/cordon.go
@@ -17,9 +17,11 @@ limitations under the License.
 package drain
 
 import (
+	"context"
 	"fmt"
 
 	corev1 "k8s.io/api/core/v1"
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
 	"k8s.io/apimachinery/pkg/runtime"
 	"k8s.io/apimachinery/pkg/runtime/schema"
 
@@ -70,7 +72,7 @@ func (c *CordonHelper) UpdateIfRequired(desired bool) bool {
 // updating the given node object; it may return error if the object cannot be encoded as
 // JSON, or if either patch or update calls fail; it will also return a second error
 // whenever creating a patch has failed
-func (c *CordonHelper) PatchOrReplace(clientset kubernetes.Interface) (error, error) {
+func (c *CordonHelper) PatchOrReplace(clientset kubernetes.Interface, serverDryRun bool) (error, error) {
 	client := clientset.CoreV1().Nodes()
 
 	oldData, err := json.Marshal(c.node)
@@ -87,9 +89,17 @@ func (c *CordonHelper) PatchOrReplace(clientset kubernetes.Interface) (error, er
 
 	patchBytes, patchErr := strategicpatch.CreateTwoWayMergePatch(oldData, newData, c.node)
 	if patchErr == nil {
-		_, err = client.Patch(c.node.Name, types.StrategicMergePatchType, patchBytes)
+		patchOptions := metav1.PatchOptions{}
+		if serverDryRun {
+			patchOptions.DryRun = []string{metav1.DryRunAll}
+		}
+		_, err = client.Patch(context.TODO(), c.node.Name, types.StrategicMergePatchType, patchBytes, patchOptions)
 	} else {
-		_, err = client.Update(c.node)
+		updateOptions := metav1.UpdateOptions{}
+		if serverDryRun {
+			updateOptions.DryRun = []string{metav1.DryRunAll}
+		}
+		_, err = client.Update(context.TODO(), c.node, updateOptions)
 	}
 	return err, patchErr
 }
diff --git a/vendor/k8s.io/kubectl/pkg/drain/default.go b/vendor/k8s.io/kubectl/pkg/drain/default.go
index ec0351b0..3df949f0 100644
--- a/vendor/k8s.io/kubectl/pkg/drain/default.go
+++ b/vendor/k8s.io/kubectl/pkg/drain/default.go
@@ -57,7 +57,7 @@ func RunCordonOrUncordon(drainer *Helper, node *corev1.Node, desired bool) error
 		return nil
 	}
 
-	err, patchErr := c.PatchOrReplace(drainer.Client)
+	err, patchErr := c.PatchOrReplace(drainer.Client, false)
 	if patchErr != nil {
 		return patchErr
 	}
diff --git a/vendor/k8s.io/kubectl/pkg/drain/drain.go b/vendor/k8s.io/kubectl/pkg/drain/drain.go
index d123dd33..9c203668 100644
--- a/vendor/k8s.io/kubectl/pkg/drain/drain.go
+++ b/vendor/k8s.io/kubectl/pkg/drain/drain.go
@@ -31,7 +31,9 @@ import (
 	"k8s.io/apimachinery/pkg/labels"
 	utilerrors "k8s.io/apimachinery/pkg/util/errors"
 	"k8s.io/apimachinery/pkg/util/wait"
+	"k8s.io/cli-runtime/pkg/resource"
 	"k8s.io/client-go/kubernetes"
+	cmdutil "k8s.io/kubectl/pkg/cmd/util"
 )
 
 const (
@@ -39,10 +41,12 @@ const (
 	EvictionKind = "Eviction"
 	// EvictionSubresource represents the kind of evictions object as pod's subresource
 	EvictionSubresource = "pods/eviction"
+	podSkipMsgTemplate  = "pod %q has DeletionTimestamp older than %v seconds, skipping\n"
 )
 
 // Helper contains the parameters to control the behaviour of drainer
 type Helper struct {
+	Ctx                 context.Context
 	Client              kubernetes.Interface
 	Force               bool
 	GracePeriodSeconds  int
@@ -51,16 +55,39 @@ type Helper struct {
 	DeleteLocalData     bool
 	Selector            string
 	PodSelector         string
-	Out                 io.Writer
-	ErrOut              io.Writer
 
-	// TODO(justinsb): unnecessary?
-	DryRun bool
+	// DisableEviction forces drain to use delete rather than evict
+	DisableEviction bool
+
+	// SkipWaitForDeleteTimeoutSeconds ignores pods that have a
+	// DeletionTimeStamp > N seconds. It's up to the user to decide when this
+	// option is appropriate; examples include the Node is unready and the pods
+	// won't drain otherwise
+	SkipWaitForDeleteTimeoutSeconds int
+
+	Out    io.Writer
+	ErrOut io.Writer
+
+	DryRunStrategy cmdutil.DryRunStrategy
+	DryRunVerifier *resource.DryRunVerifier
 
 	// OnPodDeletedOrEvicted is called when a pod is evicted/deleted; for printing progress output
 	OnPodDeletedOrEvicted func(pod *corev1.Pod, usingEviction bool)
 }
 
+type waitForDeleteParams struct {
+	ctx                             context.Context
+	pods                            []corev1.Pod
+	interval                        time.Duration
+	timeout                         time.Duration
+	usingEviction                   bool
+	getPodFn                        func(string, string) (*corev1.Pod, error)
+	onDoneFn                        func(pod *corev1.Pod, usingEviction bool)
+	globalTimeout                   time.Duration
+	skipWaitForDeleteTimeoutSeconds int
+	out                             io.Writer
+}
+
 // CheckEvictionSupport uses Discovery API to find out if the server support
 // eviction subresource If support, it will return its groupVersion; Otherwise,
 // it will return an empty string
@@ -94,22 +121,37 @@ func CheckEvictionSupport(clientset kubernetes.Interface) (string, error) {
 	return "", nil
 }
 
-func (d *Helper) makeDeleteOptions() *metav1.DeleteOptions {
-	deleteOptions := &metav1.DeleteOptions{}
+func (d *Helper) makeDeleteOptions() metav1.DeleteOptions {
+	deleteOptions := metav1.DeleteOptions{}
 	if d.GracePeriodSeconds >= 0 {
 		gracePeriodSeconds := int64(d.GracePeriodSeconds)
 		deleteOptions.GracePeriodSeconds = &gracePeriodSeconds
 	}
+	if d.DryRunStrategy == cmdutil.DryRunServer {
+		deleteOptions.DryRun = []string{metav1.DryRunAll}
+	}
 	return deleteOptions
 }
 
 // DeletePod will delete the given pod, or return an error if it couldn't
 func (d *Helper) DeletePod(pod corev1.Pod) error {
-	return d.Client.CoreV1().Pods(pod.Namespace).Delete(pod.Name, d.makeDeleteOptions())
+	if d.DryRunStrategy == cmdutil.DryRunServer {
+		if err := d.DryRunVerifier.HasSupport(pod.GroupVersionKind()); err != nil {
+			return err
+		}
+	}
+	return d.Client.CoreV1().Pods(pod.Namespace).Delete(context.TODO(), pod.Name, d.makeDeleteOptions())
 }
 
 // EvictPod will evict the give pod, or return an error if it couldn't
 func (d *Helper) EvictPod(pod corev1.Pod, policyGroupVersion string) error {
+	if d.DryRunStrategy == cmdutil.DryRunServer {
+		if err := d.DryRunVerifier.HasSupport(pod.GroupVersionKind()); err != nil {
+			return err
+		}
+	}
+
+	delOpts := d.makeDeleteOptions()
 	eviction := &policyv1beta1.Eviction{
 		TypeMeta: metav1.TypeMeta{
 			APIVersion: policyGroupVersion,
@@ -119,10 +161,11 @@ func (d *Helper) EvictPod(pod corev1.Pod, policyGroupVersion string) error {
 			Name:      pod.Name,
 			Namespace: pod.Namespace,
 		},
-		DeleteOptions: d.makeDeleteOptions(),
+		DeleteOptions: &delOpts,
 	}
+
 	// Remember to change change the URL manipulation func when Eviction's version change
-	return d.Client.PolicyV1beta1().Evictions(eviction.Namespace).Evict(eviction)
+	return d.Client.PolicyV1beta1().Evictions(eviction.Namespace).Evict(context.TODO(), eviction)
 }
 
 // GetPodsForDeletion receives resource info for a node, and returns those pods as PodDeleteList,
@@ -135,7 +178,7 @@ func (d *Helper) GetPodsForDeletion(nodeName string) (*podDeleteList, []error) {
 		return nil, []error{err}
 	}
 
-	podList, err := d.Client.CoreV1().Pods(metav1.NamespaceAll).List(metav1.ListOptions{
+	podList, err := d.Client.CoreV1().Pods(metav1.NamespaceAll).List(context.TODO(), metav1.ListOptions{
 		LabelSelector: labelSelector.String(),
 		FieldSelector: fields.SelectorFromSet(fields.Set{"spec.nodeName": nodeName}).String()})
 	if err != nil {
@@ -155,12 +198,13 @@ func (d *Helper) GetPodsForDeletion(nodeName string) (*podDeleteList, []error) {
 				break
 			}
 		}
-		if status.delete {
-			pods = append(pods, podDelete{
-				pod:    pod,
-				status: status,
-			})
-		}
+		// Add the pod to podDeleteList no matter what podDeleteStatus is,
+		// those pods whose podDeleteStatus is false like DaemonSet will
+		// be catched by list.errors()
+		pods = append(pods, podDelete{
+			pod:    pod,
+			status: status,
+		})
 	}
 
 	list := &podDeleteList{items: pods}
@@ -178,17 +222,20 @@ func (d *Helper) DeleteOrEvictPods(pods []corev1.Pod) error {
 		return nil
 	}
 
-	policyGroupVersion, err := CheckEvictionSupport(d.Client)
-	if err != nil {
-		return err
-	}
-
 	// TODO(justinsb): unnecessary?
 	getPodFn := func(namespace, name string) (*corev1.Pod, error) {
-		return d.Client.CoreV1().Pods(namespace).Get(name, metav1.GetOptions{})
+		return d.Client.CoreV1().Pods(namespace).Get(context.TODO(), name, metav1.GetOptions{})
 	}
-	if len(policyGroupVersion) > 0 {
-		return d.evictPods(pods, policyGroupVersion, getPodFn)
+
+	if !d.DisableEviction {
+		policyGroupVersion, err := CheckEvictionSupport(d.Client)
+		if err != nil {
+			return err
+		}
+
+		if len(policyGroupVersion) > 0 {
+			return d.evictPods(pods, policyGroupVersion, getPodFn)
+		}
 	}
 
 	return d.deletePods(pods, getPodFn)
@@ -203,12 +250,17 @@ func (d *Helper) evictPods(pods []corev1.Pod, policyGroupVersion string, getPodF
 	} else {
 		globalTimeout = d.Timeout
 	}
-	ctx, cancel := context.WithTimeout(context.TODO(), globalTimeout)
+	ctx, cancel := context.WithTimeout(d.getContext(), globalTimeout)
 	defer cancel()
 	for _, pod := range pods {
 		go func(pod corev1.Pod, returnCh chan error) {
 			for {
-				fmt.Fprintf(d.Out, "evicting pod %q\n", pod.Name)
+				switch d.DryRunStrategy {
+				case cmdutil.DryRunServer:
+					fmt.Fprintf(d.Out, "evicting pod %s/%s (server dry run)\n", pod.Namespace, pod.Name)
+				default:
+					fmt.Fprintf(d.Out, "evicting pod %s/%s\n", pod.Namespace, pod.Name)
+				}
 				select {
 				case <-ctx.Done():
 					// return here or we'll leak a goroutine.
@@ -230,7 +282,23 @@ func (d *Helper) evictPods(pods []corev1.Pod, policyGroupVersion string, getPodF
 					return
 				}
 			}
-			_, err := waitForDelete(ctx, []corev1.Pod{pod}, 1*time.Second, time.Duration(math.MaxInt64), true, getPodFn, d.OnPodDeletedOrEvicted, globalTimeout)
+			if d.DryRunStrategy == cmdutil.DryRunServer {
+				returnCh <- nil
+				return
+			}
+			params := waitForDeleteParams{
+				ctx:                             ctx,
+				pods:                            []corev1.Pod{pod},
+				interval:                        1 * time.Second,
+				timeout:                         time.Duration(math.MaxInt64),
+				usingEviction:                   true,
+				getPodFn:                        getPodFn,
+				onDoneFn:                        d.OnPodDeletedOrEvicted,
+				globalTimeout:                   globalTimeout,
+				skipWaitForDeleteTimeoutSeconds: d.SkipWaitForDeleteTimeoutSeconds,
+				out:                             d.Out,
+			}
+			_, err := waitForDelete(params)
 			if err == nil {
 				returnCh <- nil
 			} else {
@@ -271,38 +339,64 @@ func (d *Helper) deletePods(pods []corev1.Pod, getPodFn func(namespace, name str
 			return err
 		}
 	}
-	ctx := context.TODO()
-	_, err := waitForDelete(ctx, pods, 1*time.Second, globalTimeout, false, getPodFn, d.OnPodDeletedOrEvicted, globalTimeout)
+	ctx := d.getContext()
+	params := waitForDeleteParams{
+		ctx:                             ctx,
+		pods:                            pods,
+		interval:                        1 * time.Second,
+		timeout:                         globalTimeout,
+		usingEviction:                   false,
+		getPodFn:                        getPodFn,
+		onDoneFn:                        d.OnPodDeletedOrEvicted,
+		globalTimeout:                   globalTimeout,
+		skipWaitForDeleteTimeoutSeconds: d.SkipWaitForDeleteTimeoutSeconds,
+		out:                             d.Out,
+	}
+	_, err := waitForDelete(params)
 	return err
 }
 
-func waitForDelete(ctx context.Context, pods []corev1.Pod, interval, timeout time.Duration, usingEviction bool, getPodFn func(string, string) (*corev1.Pod, error), onDoneFn func(pod *corev1.Pod, usingEviction bool), globalTimeout time.Duration) ([]corev1.Pod, error) {
-	err := wait.PollImmediate(interval, timeout, func() (bool, error) {
+func waitForDelete(params waitForDeleteParams) ([]corev1.Pod, error) {
+	pods := params.pods
+	err := wait.PollImmediate(params.interval, params.timeout, func() (bool, error) {
 		pendingPods := []corev1.Pod{}
 		for i, pod := range pods {
-			p, err := getPodFn(pod.Namespace, pod.Name)
+			p, err := params.getPodFn(pod.Namespace, pod.Name)
 			if apierrors.IsNotFound(err) || (p != nil && p.ObjectMeta.UID != pod.ObjectMeta.UID) {
-				if onDoneFn != nil {
-					onDoneFn(&pod, usingEviction)
+				if params.onDoneFn != nil {
+					params.onDoneFn(&pod, params.usingEviction)
 				}
 				continue
 			} else if err != nil {
 				return false, err
 			} else {
+				if shouldSkipPod(*p, params.skipWaitForDeleteTimeoutSeconds) {
+					fmt.Fprintf(params.out, podSkipMsgTemplate, pod.Name, params.skipWaitForDeleteTimeoutSeconds)
+					continue
+				}
 				pendingPods = append(pendingPods, pods[i])
 			}
 		}
 		pods = pendingPods
 		if len(pendingPods) > 0 {
 			select {
-			case <-ctx.Done():
-				return false, fmt.Errorf("global timeout reached: %v", globalTimeout)
+			case <-params.ctx.Done():
+				return false, fmt.Errorf("global timeout reached: %v", params.globalTimeout)
 			default:
 				return false, nil
 			}
-			return false, nil
 		}
 		return true, nil
 	})
 	return pods, err
 }
+
+// Since Helper does not have a constructor, we can't enforce Helper.Ctx != nil
+// Multiple public methods prevent us from initializing the context in a single
+// place as well.
+func (d *Helper) getContext() context.Context {
+	if d.Ctx != nil {
+		return d.Ctx
+	}
+	return context.Background()
+}
diff --git a/vendor/k8s.io/kubectl/pkg/drain/filters.go b/vendor/k8s.io/kubectl/pkg/drain/filters.go
index 2cbba245..1ffbbbe5 100644
--- a/vendor/k8s.io/kubectl/pkg/drain/filters.go
+++ b/vendor/k8s.io/kubectl/pkg/drain/filters.go
@@ -17,8 +17,10 @@ limitations under the License.
 package drain
 
 import (
+	"context"
 	"fmt"
 	"strings"
+	"time"
 
 	appsv1 "k8s.io/api/apps/v1"
 	corev1 "k8s.io/api/core/v1"
@@ -133,8 +135,11 @@ func makePodDeleteStatusWithError(message string) podDeleteStatus {
 	}
 }
 
+// The filters are applied in a specific order, only the last filter's
+// message will be retained if there are any warnings.
 func (d *Helper) makeFilters() []podFilter {
 	return []podFilter{
+		d.skipDeletedFilter,
 		d.daemonSetFilter,
 		d.mirrorPodFilter,
 		d.localStorageFilter,
@@ -168,7 +173,7 @@ func (d *Helper) daemonSetFilter(pod corev1.Pod) podDeleteStatus {
 		return makePodDeleteStatusOkay()
 	}
 
-	if _, err := d.Client.AppsV1().DaemonSets(pod.Namespace).Get(controllerRef.Name, metav1.GetOptions{}); err != nil {
+	if _, err := d.Client.AppsV1().DaemonSets(pod.Namespace).Get(context.TODO(), controllerRef.Name, metav1.GetOptions{}); err != nil {
 		// remove orphaned pods with a warning if --force is used
 		if apierrors.IsNotFound(err) && d.Force {
 			return makePodDeleteStatusWithWarning(true, err.Error())
@@ -203,6 +208,9 @@ func (d *Helper) localStorageFilter(pod corev1.Pod) podDeleteStatus {
 		return makePodDeleteStatusWithError(localStorageFatal)
 	}
 
+	// TODO: this warning gets dropped by subsequent filters;
+	// consider accounting for multiple warning conditions or at least
+	// preserving the last warning message.
 	return makePodDeleteStatusWithWarning(true, localStorageWarning)
 }
 
@@ -221,3 +229,16 @@ func (d *Helper) unreplicatedFilter(pod corev1.Pod) podDeleteStatus {
 	}
 	return makePodDeleteStatusWithError(unmanagedFatal)
 }
+
+func shouldSkipPod(pod corev1.Pod, skipDeletedTimeoutSeconds int) bool {
+	return skipDeletedTimeoutSeconds > 0 &&
+		!pod.ObjectMeta.DeletionTimestamp.IsZero() &&
+		int(time.Now().Sub(pod.ObjectMeta.GetDeletionTimestamp().Time).Seconds()) > skipDeletedTimeoutSeconds
+}
+
+func (d *Helper) skipDeletedFilter(pod corev1.Pod) podDeleteStatus {
+	if shouldSkipPod(pod, d.SkipWaitForDeleteTimeoutSeconds) {
+		return makePodDeleteStatusSkip()
+	}
+	return makePodDeleteStatusOkay()
+}
diff --git a/vendor/k8s.io/kubectl/pkg/scheme/install.go b/vendor/k8s.io/kubectl/pkg/scheme/install.go
new file mode 100644
index 00000000..ffd15bf1
--- /dev/null
+++ b/vendor/k8s.io/kubectl/pkg/scheme/install.go
@@ -0,0 +1,83 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package scheme
+
+import (
+	admissionv1 "k8s.io/api/admission/v1"
+	admissionv1beta1 "k8s.io/api/admission/v1beta1"
+	admissionregistrationv1 "k8s.io/api/admissionregistration/v1"
+	admissionregistrationv1beta1 "k8s.io/api/admissionregistration/v1beta1"
+	appsv1 "k8s.io/api/apps/v1"
+	appsv1beta1 "k8s.io/api/apps/v1beta1"
+	appsv1beta2 "k8s.io/api/apps/v1beta2"
+	authenticationv1 "k8s.io/api/authentication/v1"
+	authenticationv1beta1 "k8s.io/api/authentication/v1beta1"
+	authorizationv1 "k8s.io/api/authorization/v1"
+	authorizationv1beta1 "k8s.io/api/authorization/v1beta1"
+	autoscalingv1 "k8s.io/api/autoscaling/v1"
+	autoscalingv2beta1 "k8s.io/api/autoscaling/v2beta1"
+	batchv1 "k8s.io/api/batch/v1"
+	batchv1beta1 "k8s.io/api/batch/v1beta1"
+	batchv2alpha1 "k8s.io/api/batch/v2alpha1"
+	certificatesv1beta1 "k8s.io/api/certificates/v1beta1"
+	corev1 "k8s.io/api/core/v1"
+	extensionsv1beta1 "k8s.io/api/extensions/v1beta1"
+	imagepolicyv1alpha1 "k8s.io/api/imagepolicy/v1alpha1"
+	networkingv1 "k8s.io/api/networking/v1"
+	policyv1beta1 "k8s.io/api/policy/v1beta1"
+	rbacv1 "k8s.io/api/rbac/v1"
+	rbacv1alpha1 "k8s.io/api/rbac/v1alpha1"
+	rbacv1beta1 "k8s.io/api/rbac/v1beta1"
+	schedulingv1alpha1 "k8s.io/api/scheduling/v1alpha1"
+	settingsv1alpha1 "k8s.io/api/settings/v1alpha1"
+	storagev1 "k8s.io/api/storage/v1"
+	storagev1beta1 "k8s.io/api/storage/v1beta1"
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	metav1beta1 "k8s.io/apimachinery/pkg/apis/meta/v1beta1"
+	"k8s.io/apimachinery/pkg/runtime/schema"
+	utilruntime "k8s.io/apimachinery/pkg/util/runtime"
+	"k8s.io/client-go/kubernetes/scheme"
+)
+
+// Register all groups in the kubectl's registry, but no componentconfig group since it's not in k8s.io/api
+// The code in this file mostly duplicate the install under k8s.io/kubernetes/pkg/api and k8s.io/kubernetes/pkg/apis,
+// but does NOT register the internal types.
+func init() {
+	// Register external types for Scheme
+	metav1.AddToGroupVersion(Scheme, schema.GroupVersion{Version: "v1"})
+	utilruntime.Must(metav1beta1.AddMetaToScheme(Scheme))
+	utilruntime.Must(metav1.AddMetaToScheme(Scheme))
+	utilruntime.Must(scheme.AddToScheme(Scheme))
+
+	utilruntime.Must(Scheme.SetVersionPriority(corev1.SchemeGroupVersion))
+	utilruntime.Must(Scheme.SetVersionPriority(admissionv1beta1.SchemeGroupVersion, admissionv1.SchemeGroupVersion))
+	utilruntime.Must(Scheme.SetVersionPriority(admissionregistrationv1beta1.SchemeGroupVersion, admissionregistrationv1.SchemeGroupVersion))
+	utilruntime.Must(Scheme.SetVersionPriority(appsv1beta1.SchemeGroupVersion, appsv1beta2.SchemeGroupVersion, appsv1.SchemeGroupVersion))
+	utilruntime.Must(Scheme.SetVersionPriority(authenticationv1.SchemeGroupVersion, authenticationv1beta1.SchemeGroupVersion))
+	utilruntime.Must(Scheme.SetVersionPriority(authorizationv1.SchemeGroupVersion, authorizationv1beta1.SchemeGroupVersion))
+	utilruntime.Must(Scheme.SetVersionPriority(autoscalingv1.SchemeGroupVersion, autoscalingv2beta1.SchemeGroupVersion))
+	utilruntime.Must(Scheme.SetVersionPriority(batchv1.SchemeGroupVersion, batchv1beta1.SchemeGroupVersion, batchv2alpha1.SchemeGroupVersion))
+	utilruntime.Must(Scheme.SetVersionPriority(certificatesv1beta1.SchemeGroupVersion))
+	utilruntime.Must(Scheme.SetVersionPriority(extensionsv1beta1.SchemeGroupVersion))
+	utilruntime.Must(Scheme.SetVersionPriority(imagepolicyv1alpha1.SchemeGroupVersion))
+	utilruntime.Must(Scheme.SetVersionPriority(networkingv1.SchemeGroupVersion))
+	utilruntime.Must(Scheme.SetVersionPriority(policyv1beta1.SchemeGroupVersion))
+	utilruntime.Must(Scheme.SetVersionPriority(rbacv1.SchemeGroupVersion, rbacv1beta1.SchemeGroupVersion, rbacv1alpha1.SchemeGroupVersion))
+	utilruntime.Must(Scheme.SetVersionPriority(schedulingv1alpha1.SchemeGroupVersion))
+	utilruntime.Must(Scheme.SetVersionPriority(settingsv1alpha1.SchemeGroupVersion))
+	utilruntime.Must(Scheme.SetVersionPriority(storagev1.SchemeGroupVersion, storagev1beta1.SchemeGroupVersion))
+}
diff --git a/vendor/k8s.io/kubectl/pkg/scheme/scheme.go b/vendor/k8s.io/kubectl/pkg/scheme/scheme.go
new file mode 100644
index 00000000..d1d7847b
--- /dev/null
+++ b/vendor/k8s.io/kubectl/pkg/scheme/scheme.go
@@ -0,0 +1,39 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package scheme
+
+import (
+	"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
+	"k8s.io/apimachinery/pkg/runtime"
+	"k8s.io/apimachinery/pkg/runtime/serializer"
+)
+
+// All kubectl code should eventually switch to use this Registry and Scheme instead of the global ones.
+
+// Scheme is the default instance of runtime.Scheme to which types in the Kubernetes API are already registered.
+var Scheme = runtime.NewScheme()
+
+// Codecs provides access to encoding and decoding for the scheme
+var Codecs = serializer.NewCodecFactory(Scheme)
+
+// ParameterCodec handles versioning of objects that are converted to query parameters.
+var ParameterCodec = runtime.NewParameterCodec(Scheme)
+
+// DefaultJSONEncoder returns a default encoder for our scheme
+func DefaultJSONEncoder() runtime.Encoder {
+	return unstructured.NewJSONFallbackEncoder(Codecs.LegacyCodec(Scheme.PrioritizedVersionsAllGroups()...))
+}
diff --git a/vendor/k8s.io/kubectl/pkg/util/interrupt/interrupt.go b/vendor/k8s.io/kubectl/pkg/util/interrupt/interrupt.go
new file mode 100644
index 00000000..0265b9fb
--- /dev/null
+++ b/vendor/k8s.io/kubectl/pkg/util/interrupt/interrupt.go
@@ -0,0 +1,104 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package interrupt
+
+import (
+	"os"
+	"os/signal"
+	"sync"
+	"syscall"
+)
+
+// terminationSignals are signals that cause the program to exit in the
+// supported platforms (linux, darwin, windows).
+var terminationSignals = []os.Signal{syscall.SIGHUP, syscall.SIGINT, syscall.SIGTERM, syscall.SIGQUIT}
+
+// Handler guarantees execution of notifications after a critical section (the function passed
+// to a Run method), even in the presence of process termination. It guarantees exactly once
+// invocation of the provided notify functions.
+type Handler struct {
+	notify []func()
+	final  func(os.Signal)
+	once   sync.Once
+}
+
+// Chain creates a new handler that invokes all notify functions when the critical section exits
+// and then invokes the optional handler's notifications. This allows critical sections to be
+// nested without losing exactly once invocations. Notify functions can invoke any cleanup needed
+// but should not exit (which is the responsibility of the parent handler).
+func Chain(handler *Handler, notify ...func()) *Handler {
+	if handler == nil {
+		return New(nil, notify...)
+	}
+	return New(handler.Signal, append(notify, handler.Close)...)
+}
+
+// New creates a new handler that guarantees all notify functions are run after the critical
+// section exits (or is interrupted by the OS), then invokes the final handler. If no final
+// handler is specified, the default final is `os.Exit(1)`. A handler can only be used for
+// one critical section.
+func New(final func(os.Signal), notify ...func()) *Handler {
+	return &Handler{
+		final:  final,
+		notify: notify,
+	}
+}
+
+// Close executes all the notification handlers if they have not yet been executed.
+func (h *Handler) Close() {
+	h.once.Do(func() {
+		for _, fn := range h.notify {
+			fn()
+		}
+	})
+}
+
+// Signal is called when an os.Signal is received, and guarantees that all notifications
+// are executed, then the final handler is executed. This function should only be called once
+// per Handler instance.
+func (h *Handler) Signal(s os.Signal) {
+	h.once.Do(func() {
+		for _, fn := range h.notify {
+			fn()
+		}
+		if h.final == nil {
+			os.Exit(1)
+		}
+		h.final(s)
+	})
+}
+
+// Run ensures that any notifications are invoked after the provided fn exits (even if the
+// process is interrupted by an OS termination signal). Notifications are only invoked once
+// per Handler instance, so calling Run more than once will not behave as the user expects.
+func (h *Handler) Run(fn func() error) error {
+	ch := make(chan os.Signal, 1)
+	signal.Notify(ch, terminationSignals...)
+	defer func() {
+		signal.Stop(ch)
+		close(ch)
+	}()
+	go func() {
+		sig, ok := <-ch
+		if !ok {
+			return
+		}
+		h.Signal(sig)
+	}()
+	defer h.Close()
+	return fn()
+}
diff --git a/vendor/k8s.io/client-go/util/retry/OWNERS b/vendor/k8s.io/kubectl/pkg/util/openapi/OWNERS
similarity index 65%
rename from vendor/k8s.io/client-go/util/retry/OWNERS
rename to vendor/k8s.io/kubectl/pkg/util/openapi/OWNERS
index dec3e88d..99dabed0 100644
--- a/vendor/k8s.io/client-go/util/retry/OWNERS
+++ b/vendor/k8s.io/kubectl/pkg/util/openapi/OWNERS
@@ -1,4 +1,6 @@
 # See the OWNERS docs at https://go.k8s.io/owners
 
+approvers:
+- apelisse
 reviewers:
-- caesarxuchao
+- apelisse
diff --git a/vendor/k8s.io/kubectl/pkg/util/openapi/doc.go b/vendor/k8s.io/kubectl/pkg/util/openapi/doc.go
new file mode 100644
index 00000000..08194d58
--- /dev/null
+++ b/vendor/k8s.io/kubectl/pkg/util/openapi/doc.go
@@ -0,0 +1,21 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package openapi is a collection of libraries for fetching the openapi spec
+// from a Kubernetes server and then indexing the type definitions.
+// The openapi spec contains the object model definitions and extensions metadata
+// such as the patchStrategy and patchMergeKey for creating patches.
+package openapi // k8s.io/kubectl/pkg/util/openapi
diff --git a/vendor/k8s.io/kubectl/pkg/util/openapi/extensions.go b/vendor/k8s.io/kubectl/pkg/util/openapi/extensions.go
new file mode 100644
index 00000000..f1b5cdd4
--- /dev/null
+++ b/vendor/k8s.io/kubectl/pkg/util/openapi/extensions.go
@@ -0,0 +1,27 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package openapi
+
+import "github.com/go-openapi/spec"
+
+// PrintColumnsKey is the key that defines which columns should be printed
+const PrintColumnsKey = "x-kubernetes-print-columns"
+
+// GetPrintColumns looks for the open API extension for the display columns.
+func GetPrintColumns(extensions spec.Extensions) (string, bool) {
+	return extensions.GetString(PrintColumnsKey)
+}
diff --git a/vendor/k8s.io/kubectl/pkg/util/openapi/openapi.go b/vendor/k8s.io/kubectl/pkg/util/openapi/openapi.go
new file mode 100644
index 00000000..c8f370b9
--- /dev/null
+++ b/vendor/k8s.io/kubectl/pkg/util/openapi/openapi.go
@@ -0,0 +1,128 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package openapi
+
+import (
+	openapi_v2 "github.com/googleapis/gnostic/OpenAPIv2"
+
+	"k8s.io/apimachinery/pkg/runtime/schema"
+	"k8s.io/kube-openapi/pkg/util/proto"
+)
+
+// Resources interface describe a resources provider, that can give you
+// resource based on group-version-kind.
+type Resources interface {
+	LookupResource(gvk schema.GroupVersionKind) proto.Schema
+}
+
+// groupVersionKindExtensionKey is the key used to lookup the
+// GroupVersionKind value for an object definition from the
+// definition's "extensions" map.
+const groupVersionKindExtensionKey = "x-kubernetes-group-version-kind"
+
+// document is an implementation of `Resources`. It looks for
+// resources in an openapi Schema.
+type document struct {
+	// Maps gvk to model name
+	resources map[schema.GroupVersionKind]string
+	models    proto.Models
+}
+
+var _ Resources = &document{}
+
+// NewOpenAPIData creates a new `Resources` out of the openapi document
+func NewOpenAPIData(doc *openapi_v2.Document) (Resources, error) {
+	models, err := proto.NewOpenAPIData(doc)
+	if err != nil {
+		return nil, err
+	}
+
+	resources := map[schema.GroupVersionKind]string{}
+	for _, modelName := range models.ListModels() {
+		model := models.LookupModel(modelName)
+		if model == nil {
+			panic("ListModels returns a model that can't be looked-up.")
+		}
+		gvkList := parseGroupVersionKind(model)
+		for _, gvk := range gvkList {
+			if len(gvk.Kind) > 0 {
+				resources[gvk] = modelName
+			}
+		}
+	}
+
+	return &document{
+		resources: resources,
+		models:    models,
+	}, nil
+}
+
+func (d *document) LookupResource(gvk schema.GroupVersionKind) proto.Schema {
+	modelName, found := d.resources[gvk]
+	if !found {
+		return nil
+	}
+	return d.models.LookupModel(modelName)
+}
+
+// Get and parse GroupVersionKind from the extension. Returns empty if it doesn't have one.
+func parseGroupVersionKind(s proto.Schema) []schema.GroupVersionKind {
+	extensions := s.GetExtensions()
+
+	gvkListResult := []schema.GroupVersionKind{}
+
+	// Get the extensions
+	gvkExtension, ok := extensions[groupVersionKindExtensionKey]
+	if !ok {
+		return []schema.GroupVersionKind{}
+	}
+
+	// gvk extension must be a list of at least 1 element.
+	gvkList, ok := gvkExtension.([]interface{})
+	if !ok {
+		return []schema.GroupVersionKind{}
+	}
+
+	for _, gvk := range gvkList {
+		// gvk extension list must be a map with group, version, and
+		// kind fields
+		gvkMap, ok := gvk.(map[interface{}]interface{})
+		if !ok {
+			continue
+		}
+		group, ok := gvkMap["group"].(string)
+		if !ok {
+			continue
+		}
+		version, ok := gvkMap["version"].(string)
+		if !ok {
+			continue
+		}
+		kind, ok := gvkMap["kind"].(string)
+		if !ok {
+			continue
+		}
+
+		gvkListResult = append(gvkListResult, schema.GroupVersionKind{
+			Group:   group,
+			Version: version,
+			Kind:    kind,
+		})
+	}
+
+	return gvkListResult
+}
diff --git a/vendor/k8s.io/kubectl/pkg/util/openapi/openapi_getter.go b/vendor/k8s.io/kubectl/pkg/util/openapi/openapi_getter.go
new file mode 100644
index 00000000..d5c9476a
--- /dev/null
+++ b/vendor/k8s.io/kubectl/pkg/util/openapi/openapi_getter.go
@@ -0,0 +1,65 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package openapi
+
+import (
+	"sync"
+
+	"k8s.io/client-go/discovery"
+)
+
+// synchronizedOpenAPIGetter fetches the openapi schema once and then caches it in memory
+type synchronizedOpenAPIGetter struct {
+	// Cached results
+	sync.Once
+	openAPISchema Resources
+	err           error
+
+	openAPIClient discovery.OpenAPISchemaInterface
+}
+
+var _ Getter = &synchronizedOpenAPIGetter{}
+
+// Getter is an interface for fetching openapi specs and parsing them into an Resources struct
+type Getter interface {
+	// OpenAPIData returns the parsed OpenAPIData
+	Get() (Resources, error)
+}
+
+// NewOpenAPIGetter returns an object to return OpenAPIDatas which reads
+// from a server, and then stores in memory for subsequent invocations
+func NewOpenAPIGetter(openAPIClient discovery.OpenAPISchemaInterface) Getter {
+	return &synchronizedOpenAPIGetter{
+		openAPIClient: openAPIClient,
+	}
+}
+
+// Resources implements Getter
+func (g *synchronizedOpenAPIGetter) Get() (Resources, error) {
+	g.Do(func() {
+		s, err := g.openAPIClient.OpenAPISchema()
+		if err != nil {
+			g.err = err
+			return
+		}
+
+		g.openAPISchema, g.err = NewOpenAPIData(s)
+	})
+
+	// Return the save result
+	return g.openAPISchema, g.err
+}
diff --git a/vendor/k8s.io/kubectl/pkg/util/openapi/validation/validation.go b/vendor/k8s.io/kubectl/pkg/util/openapi/validation/validation.go
new file mode 100644
index 00000000..25aec97e
--- /dev/null
+++ b/vendor/k8s.io/kubectl/pkg/util/openapi/validation/validation.go
@@ -0,0 +1,140 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package validation
+
+import (
+	"errors"
+
+	"k8s.io/apimachinery/pkg/runtime/schema"
+	utilerrors "k8s.io/apimachinery/pkg/util/errors"
+	"k8s.io/apimachinery/pkg/util/json"
+	"k8s.io/apimachinery/pkg/util/yaml"
+	"k8s.io/kube-openapi/pkg/util/proto/validation"
+	"k8s.io/kubectl/pkg/util/openapi"
+)
+
+// SchemaValidation validates the object against an OpenAPI schema.
+type SchemaValidation struct {
+	resources openapi.Resources
+}
+
+// NewSchemaValidation creates a new SchemaValidation that can be used
+// to validate objects.
+func NewSchemaValidation(resources openapi.Resources) *SchemaValidation {
+	return &SchemaValidation{
+		resources: resources,
+	}
+}
+
+// ValidateBytes will validates the object against using the Resources
+// object.
+func (v *SchemaValidation) ValidateBytes(data []byte) error {
+	obj, err := parse(data)
+	if err != nil {
+		return err
+	}
+
+	gvk, errs := getObjectKind(obj)
+	if errs != nil {
+		return utilerrors.NewAggregate(errs)
+	}
+
+	if (gvk == schema.GroupVersionKind{Version: "v1", Kind: "List"}) {
+		return utilerrors.NewAggregate(v.validateList(obj))
+	}
+
+	return utilerrors.NewAggregate(v.validateResource(obj, gvk))
+}
+
+func (v *SchemaValidation) validateList(object interface{}) []error {
+	fields, ok := object.(map[string]interface{})
+	if !ok || fields == nil {
+		return []error{errors.New("invalid object to validate")}
+	}
+
+	allErrors := []error{}
+	if _, ok := fields["items"].([]interface{}); !ok {
+		return []error{errors.New("invalid object to validate")}
+	}
+	for _, item := range fields["items"].([]interface{}) {
+		if gvk, errs := getObjectKind(item); errs != nil {
+			allErrors = append(allErrors, errs...)
+		} else {
+			allErrors = append(allErrors, v.validateResource(item, gvk)...)
+		}
+	}
+	return allErrors
+}
+
+func (v *SchemaValidation) validateResource(obj interface{}, gvk schema.GroupVersionKind) []error {
+	resource := v.resources.LookupResource(gvk)
+	if resource == nil {
+		// resource is not present, let's just skip validation.
+		return nil
+	}
+
+	return validation.ValidateModel(obj, resource, gvk.Kind)
+}
+
+func parse(data []byte) (interface{}, error) {
+	var obj interface{}
+	out, err := yaml.ToJSON(data)
+	if err != nil {
+		return nil, err
+	}
+	if err := json.Unmarshal(out, &obj); err != nil {
+		return nil, err
+	}
+	return obj, nil
+}
+
+func getObjectKind(object interface{}) (schema.GroupVersionKind, []error) {
+	var listErrors []error
+	fields, ok := object.(map[string]interface{})
+	if !ok || fields == nil {
+		listErrors = append(listErrors, errors.New("invalid object to validate"))
+		return schema.GroupVersionKind{}, listErrors
+	}
+
+	var group string
+	var version string
+	apiVersion := fields["apiVersion"]
+	if apiVersion == nil {
+		listErrors = append(listErrors, errors.New("apiVersion not set"))
+	} else if _, ok := apiVersion.(string); !ok {
+		listErrors = append(listErrors, errors.New("apiVersion isn't string type"))
+	} else {
+		gv, err := schema.ParseGroupVersion(apiVersion.(string))
+		if err != nil {
+			listErrors = append(listErrors, err)
+		} else {
+			group = gv.Group
+			version = gv.Version
+		}
+	}
+	kind := fields["kind"]
+	if kind == nil {
+		listErrors = append(listErrors, errors.New("kind not set"))
+	} else if _, ok := kind.(string); !ok {
+		listErrors = append(listErrors, errors.New("kind isn't string type"))
+	}
+	if listErrors != nil {
+		return schema.GroupVersionKind{}, listErrors
+	}
+
+	return schema.GroupVersionKind{Group: group, Version: version, Kind: kind.(string)}, nil
+}
diff --git a/vendor/k8s.io/kubectl/pkg/util/templates/command_groups.go b/vendor/k8s.io/kubectl/pkg/util/templates/command_groups.go
new file mode 100644
index 00000000..447a3962
--- /dev/null
+++ b/vendor/k8s.io/kubectl/pkg/util/templates/command_groups.go
@@ -0,0 +1,59 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package templates
+
+import (
+	"github.com/spf13/cobra"
+)
+
+type CommandGroup struct {
+	Message  string
+	Commands []*cobra.Command
+}
+
+type CommandGroups []CommandGroup
+
+func (g CommandGroups) Add(c *cobra.Command) {
+	for _, group := range g {
+		c.AddCommand(group.Commands...)
+	}
+}
+
+func (g CommandGroups) Has(c *cobra.Command) bool {
+	for _, group := range g {
+		for _, command := range group.Commands {
+			if command == c {
+				return true
+			}
+		}
+	}
+	return false
+}
+
+func AddAdditionalCommands(g CommandGroups, message string, cmds []*cobra.Command) CommandGroups {
+	group := CommandGroup{Message: message}
+	for _, c := range cmds {
+		// Don't show commands that have no short description
+		if !g.Has(c) && len(c.Short) != 0 {
+			group.Commands = append(group.Commands, c)
+		}
+	}
+	if len(group.Commands) == 0 {
+		return g
+	}
+	return append(g, group)
+}
diff --git a/vendor/k8s.io/kubectl/pkg/util/templates/markdown.go b/vendor/k8s.io/kubectl/pkg/util/templates/markdown.go
new file mode 100644
index 00000000..65ee886d
--- /dev/null
+++ b/vendor/k8s.io/kubectl/pkg/util/templates/markdown.go
@@ -0,0 +1,147 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package templates
+
+import (
+	"bytes"
+	"fmt"
+	"strings"
+
+	"github.com/russross/blackfriday"
+)
+
+const linebreak = "\n"
+
+// ASCIIRenderer implements blackfriday.Renderer
+var _ blackfriday.Renderer = &ASCIIRenderer{}
+
+// ASCIIRenderer is a blackfriday.Renderer intended for rendering markdown
+// documents as plain text, well suited for human reading on terminals.
+type ASCIIRenderer struct {
+	Indentation string
+
+	listItemCount uint
+	listLevel     uint
+}
+
+// NormalText gets a text chunk *after* the markdown syntax was already
+// processed and does a final cleanup on things we don't expect here, like
+// removing linebreaks on things that are not a paragraph break (auto unwrap).
+func (r *ASCIIRenderer) NormalText(out *bytes.Buffer, text []byte) {
+	raw := string(text)
+	lines := strings.Split(raw, linebreak)
+	for _, line := range lines {
+		trimmed := strings.Trim(line, " \n\t")
+		if len(trimmed) > 0 && trimmed[0] != '_' {
+			out.WriteString(" ")
+		}
+		out.WriteString(trimmed)
+	}
+}
+
+// List renders the start and end of a list.
+func (r *ASCIIRenderer) List(out *bytes.Buffer, text func() bool, flags int) {
+	r.listLevel++
+	out.WriteString(linebreak)
+	text()
+	r.listLevel--
+}
+
+// ListItem renders list items and supports both ordered and unordered lists.
+func (r *ASCIIRenderer) ListItem(out *bytes.Buffer, text []byte, flags int) {
+	if flags&blackfriday.LIST_ITEM_BEGINNING_OF_LIST != 0 {
+		r.listItemCount = 1
+	} else {
+		r.listItemCount++
+	}
+	indent := strings.Repeat(r.Indentation, int(r.listLevel))
+	var bullet string
+	if flags&blackfriday.LIST_TYPE_ORDERED != 0 {
+		bullet += fmt.Sprintf("%d.", r.listItemCount)
+	} else {
+		bullet += "*"
+	}
+	out.WriteString(indent + bullet + " ")
+	r.fw(out, text)
+	out.WriteString(linebreak)
+}
+
+// Paragraph renders the start and end of a paragraph.
+func (r *ASCIIRenderer) Paragraph(out *bytes.Buffer, text func() bool) {
+	out.WriteString(linebreak)
+	text()
+	out.WriteString(linebreak)
+}
+
+// BlockCode renders a chunk of text that represents source code.
+func (r *ASCIIRenderer) BlockCode(out *bytes.Buffer, text []byte, lang string) {
+	out.WriteString(linebreak)
+	lines := []string{}
+	for _, line := range strings.Split(string(text), linebreak) {
+		indented := r.Indentation + line
+		lines = append(lines, indented)
+	}
+	out.WriteString(strings.Join(lines, linebreak))
+}
+
+func (r *ASCIIRenderer) GetFlags() int { return 0 }
+func (r *ASCIIRenderer) HRule(out *bytes.Buffer) {
+	out.WriteString(linebreak + "----------" + linebreak)
+}
+func (r *ASCIIRenderer) LineBreak(out *bytes.Buffer)                                      { out.WriteString(linebreak) }
+func (r *ASCIIRenderer) TitleBlock(out *bytes.Buffer, text []byte)                        { r.fw(out, text) }
+func (r *ASCIIRenderer) Header(out *bytes.Buffer, text func() bool, level int, id string) { text() }
+func (r *ASCIIRenderer) BlockHtml(out *bytes.Buffer, text []byte)                         { r.fw(out, text) }
+func (r *ASCIIRenderer) BlockQuote(out *bytes.Buffer, text []byte)                        { r.fw(out, text) }
+func (r *ASCIIRenderer) TableRow(out *bytes.Buffer, text []byte)                          { r.fw(out, text) }
+func (r *ASCIIRenderer) TableHeaderCell(out *bytes.Buffer, text []byte, align int)        { r.fw(out, text) }
+func (r *ASCIIRenderer) TableCell(out *bytes.Buffer, text []byte, align int)              { r.fw(out, text) }
+func (r *ASCIIRenderer) Footnotes(out *bytes.Buffer, text func() bool)                    { text() }
+func (r *ASCIIRenderer) FootnoteItem(out *bytes.Buffer, name, text []byte, flags int)     { r.fw(out, text) }
+func (r *ASCIIRenderer) AutoLink(out *bytes.Buffer, link []byte, kind int)                { r.fw(out, link) }
+func (r *ASCIIRenderer) CodeSpan(out *bytes.Buffer, text []byte)                          { r.fw(out, text) }
+func (r *ASCIIRenderer) DoubleEmphasis(out *bytes.Buffer, text []byte)                    { r.fw(out, text) }
+func (r *ASCIIRenderer) Emphasis(out *bytes.Buffer, text []byte)                          { r.fw(out, text) }
+func (r *ASCIIRenderer) RawHtmlTag(out *bytes.Buffer, text []byte)                        { r.fw(out, text) }
+func (r *ASCIIRenderer) TripleEmphasis(out *bytes.Buffer, text []byte)                    { r.fw(out, text) }
+func (r *ASCIIRenderer) StrikeThrough(out *bytes.Buffer, text []byte)                     { r.fw(out, text) }
+func (r *ASCIIRenderer) FootnoteRef(out *bytes.Buffer, ref []byte, id int)                { r.fw(out, ref) }
+func (r *ASCIIRenderer) Entity(out *bytes.Buffer, entity []byte)                          { r.fw(out, entity) }
+func (r *ASCIIRenderer) Smartypants(out *bytes.Buffer, text []byte)                       { r.fw(out, text) }
+func (r *ASCIIRenderer) DocumentHeader(out *bytes.Buffer)                                 {}
+func (r *ASCIIRenderer) DocumentFooter(out *bytes.Buffer)                                 {}
+func (r *ASCIIRenderer) TocHeaderWithAnchor(text []byte, level int, anchor string)        {}
+func (r *ASCIIRenderer) TocHeader(text []byte, level int)                                 {}
+func (r *ASCIIRenderer) TocFinalize()                                                     {}
+
+func (r *ASCIIRenderer) Table(out *bytes.Buffer, header []byte, body []byte, columnData []int) {
+	r.fw(out, header, body)
+}
+
+func (r *ASCIIRenderer) Link(out *bytes.Buffer, link []byte, title []byte, content []byte) {
+	r.fw(out, link)
+}
+
+func (r *ASCIIRenderer) Image(out *bytes.Buffer, link []byte, title []byte, alt []byte) {
+	r.fw(out, link)
+}
+
+func (r *ASCIIRenderer) fw(out *bytes.Buffer, text ...[]byte) {
+	for _, t := range text {
+		out.Write(t)
+	}
+}
diff --git a/vendor/k8s.io/kubectl/pkg/util/templates/normalizers.go b/vendor/k8s.io/kubectl/pkg/util/templates/normalizers.go
new file mode 100644
index 00000000..f9041205
--- /dev/null
+++ b/vendor/k8s.io/kubectl/pkg/util/templates/normalizers.go
@@ -0,0 +1,97 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package templates
+
+import (
+	"strings"
+
+	"github.com/MakeNowJust/heredoc"
+	"github.com/russross/blackfriday"
+	"github.com/spf13/cobra"
+)
+
+const Indentation = `  `
+
+// LongDesc normalizes a command's long description to follow the conventions.
+func LongDesc(s string) string {
+	if len(s) == 0 {
+		return s
+	}
+	return normalizer{s}.heredoc().markdown().trim().string
+}
+
+// Examples normalizes a command's examples to follow the conventions.
+func Examples(s string) string {
+	if len(s) == 0 {
+		return s
+	}
+	return normalizer{s}.trim().indent().string
+}
+
+// Normalize perform all required normalizations on a given command.
+func Normalize(cmd *cobra.Command) *cobra.Command {
+	if len(cmd.Long) > 0 {
+		cmd.Long = LongDesc(cmd.Long)
+	}
+	if len(cmd.Example) > 0 {
+		cmd.Example = Examples(cmd.Example)
+	}
+	return cmd
+}
+
+// NormalizeAll perform all required normalizations in the entire command tree.
+func NormalizeAll(cmd *cobra.Command) *cobra.Command {
+	if cmd.HasSubCommands() {
+		for _, subCmd := range cmd.Commands() {
+			NormalizeAll(subCmd)
+		}
+	}
+	Normalize(cmd)
+	return cmd
+}
+
+type normalizer struct {
+	string
+}
+
+func (s normalizer) markdown() normalizer {
+	bytes := []byte(s.string)
+	formatted := blackfriday.Markdown(bytes, &ASCIIRenderer{Indentation: Indentation}, blackfriday.EXTENSION_NO_INTRA_EMPHASIS)
+	s.string = string(formatted)
+	return s
+}
+
+func (s normalizer) heredoc() normalizer {
+	s.string = heredoc.Doc(s.string)
+	return s
+}
+
+func (s normalizer) trim() normalizer {
+	s.string = strings.TrimSpace(s.string)
+	return s
+}
+
+func (s normalizer) indent() normalizer {
+	indentedLines := []string{}
+	for _, line := range strings.Split(s.string, "\n") {
+		trimmed := strings.TrimSpace(line)
+		indented := Indentation + trimmed
+		indentedLines = append(indentedLines, indented)
+	}
+	s.string = strings.Join(indentedLines, "\n")
+	return s
+}
diff --git a/vendor/k8s.io/kubectl/pkg/util/templates/templater.go b/vendor/k8s.io/kubectl/pkg/util/templates/templater.go
new file mode 100644
index 00000000..c8036ac1
--- /dev/null
+++ b/vendor/k8s.io/kubectl/pkg/util/templates/templater.go
@@ -0,0 +1,298 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package templates
+
+import (
+	"bytes"
+	"fmt"
+	"strings"
+	"text/template"
+	"unicode"
+
+	"k8s.io/kubectl/pkg/util/term"
+
+	"github.com/spf13/cobra"
+	flag "github.com/spf13/pflag"
+)
+
+type FlagExposer interface {
+	ExposeFlags(cmd *cobra.Command, flags ...string) FlagExposer
+}
+
+func ActsAsRootCommand(cmd *cobra.Command, filters []string, groups ...CommandGroup) FlagExposer {
+	if cmd == nil {
+		panic("nil root command")
+	}
+	templater := &templater{
+		RootCmd:       cmd,
+		UsageTemplate: MainUsageTemplate(),
+		HelpTemplate:  MainHelpTemplate(),
+		CommandGroups: groups,
+		Filtered:      filters,
+	}
+	cmd.SetFlagErrorFunc(templater.FlagErrorFunc())
+	cmd.SilenceUsage = true
+	cmd.SetUsageFunc(templater.UsageFunc())
+	cmd.SetHelpFunc(templater.HelpFunc())
+	return templater
+}
+
+func UseOptionsTemplates(cmd *cobra.Command) {
+	templater := &templater{
+		UsageTemplate: OptionsUsageTemplate(),
+		HelpTemplate:  OptionsHelpTemplate(),
+	}
+	cmd.SetUsageFunc(templater.UsageFunc())
+	cmd.SetHelpFunc(templater.HelpFunc())
+}
+
+type templater struct {
+	UsageTemplate string
+	HelpTemplate  string
+	RootCmd       *cobra.Command
+	CommandGroups
+	Filtered []string
+}
+
+func (templater *templater) FlagErrorFunc(exposedFlags ...string) func(*cobra.Command, error) error {
+	return func(c *cobra.Command, err error) error {
+		c.SilenceUsage = true
+		switch c.CalledAs() {
+		case "options":
+			return fmt.Errorf("%s\nRun '%s' without flags.", err, c.CommandPath())
+		default:
+			return fmt.Errorf("%s\nSee '%s --help' for usage.", err, c.CommandPath())
+		}
+	}
+}
+
+func (templater *templater) ExposeFlags(cmd *cobra.Command, flags ...string) FlagExposer {
+	cmd.SetUsageFunc(templater.UsageFunc(flags...))
+	return templater
+}
+
+func (templater *templater) HelpFunc() func(*cobra.Command, []string) {
+	return func(c *cobra.Command, s []string) {
+		t := template.New("help")
+		t.Funcs(templater.templateFuncs())
+		template.Must(t.Parse(templater.HelpTemplate))
+		out := term.NewResponsiveWriter(c.OutOrStdout())
+		err := t.Execute(out, c)
+		if err != nil {
+			c.Println(err)
+		}
+	}
+}
+
+func (templater *templater) UsageFunc(exposedFlags ...string) func(*cobra.Command) error {
+	return func(c *cobra.Command) error {
+		t := template.New("usage")
+		t.Funcs(templater.templateFuncs(exposedFlags...))
+		template.Must(t.Parse(templater.UsageTemplate))
+		out := term.NewResponsiveWriter(c.OutOrStderr())
+		return t.Execute(out, c)
+	}
+}
+
+func (templater *templater) templateFuncs(exposedFlags ...string) template.FuncMap {
+	return template.FuncMap{
+		"trim":                strings.TrimSpace,
+		"trimRight":           func(s string) string { return strings.TrimRightFunc(s, unicode.IsSpace) },
+		"trimLeft":            func(s string) string { return strings.TrimLeftFunc(s, unicode.IsSpace) },
+		"gt":                  cobra.Gt,
+		"eq":                  cobra.Eq,
+		"rpad":                rpad,
+		"appendIfNotPresent":  appendIfNotPresent,
+		"flagsNotIntersected": flagsNotIntersected,
+		"visibleFlags":        visibleFlags,
+		"flagsUsages":         flagsUsages,
+		"cmdGroups":           templater.cmdGroups,
+		"cmdGroupsString":     templater.cmdGroupsString,
+		"rootCmd":             templater.rootCmdName,
+		"isRootCmd":           templater.isRootCmd,
+		"optionsCmdFor":       templater.optionsCmdFor,
+		"usageLine":           templater.usageLine,
+		"exposed": func(c *cobra.Command) *flag.FlagSet {
+			exposed := flag.NewFlagSet("exposed", flag.ContinueOnError)
+			if len(exposedFlags) > 0 {
+				for _, name := range exposedFlags {
+					if flag := c.Flags().Lookup(name); flag != nil {
+						exposed.AddFlag(flag)
+					}
+				}
+			}
+			return exposed
+		},
+	}
+}
+
+func (templater *templater) cmdGroups(c *cobra.Command, all []*cobra.Command) []CommandGroup {
+	if len(templater.CommandGroups) > 0 && c == templater.RootCmd {
+		all = filter(all, templater.Filtered...)
+		return AddAdditionalCommands(templater.CommandGroups, "Other Commands:", all)
+	}
+	all = filter(all, "options")
+	return []CommandGroup{
+		{
+			Message:  "Available Commands:",
+			Commands: all,
+		},
+	}
+}
+
+func (t *templater) cmdGroupsString(c *cobra.Command) string {
+	groups := []string{}
+	for _, cmdGroup := range t.cmdGroups(c, c.Commands()) {
+		cmds := []string{cmdGroup.Message}
+		for _, cmd := range cmdGroup.Commands {
+			if cmd.IsAvailableCommand() {
+				cmds = append(cmds, "  "+rpad(cmd.Name(), cmd.NamePadding())+" "+cmd.Short)
+			}
+		}
+		groups = append(groups, strings.Join(cmds, "\n"))
+	}
+	return strings.Join(groups, "\n\n")
+}
+
+func (t *templater) rootCmdName(c *cobra.Command) string {
+	return t.rootCmd(c).CommandPath()
+}
+
+func (t *templater) isRootCmd(c *cobra.Command) bool {
+	return t.rootCmd(c) == c
+}
+
+func (t *templater) parents(c *cobra.Command) []*cobra.Command {
+	parents := []*cobra.Command{c}
+	for current := c; !t.isRootCmd(current) && current.HasParent(); {
+		current = current.Parent()
+		parents = append(parents, current)
+	}
+	return parents
+}
+
+func (t *templater) rootCmd(c *cobra.Command) *cobra.Command {
+	if c != nil && !c.HasParent() {
+		return c
+	}
+	if t.RootCmd == nil {
+		panic("nil root cmd")
+	}
+	return t.RootCmd
+}
+
+func (t *templater) optionsCmdFor(c *cobra.Command) string {
+	if !c.Runnable() {
+		return ""
+	}
+	rootCmdStructure := t.parents(c)
+	for i := len(rootCmdStructure) - 1; i >= 0; i-- {
+		cmd := rootCmdStructure[i]
+		if _, _, err := cmd.Find([]string{"options"}); err == nil {
+			return cmd.CommandPath() + " options"
+		}
+	}
+	return ""
+}
+
+func (t *templater) usageLine(c *cobra.Command) string {
+	usage := c.UseLine()
+	suffix := "[options]"
+	if c.HasFlags() && !strings.Contains(usage, suffix) {
+		usage += " " + suffix
+	}
+	return usage
+}
+
+func flagsUsages(f *flag.FlagSet) string {
+	x := new(bytes.Buffer)
+
+	f.VisitAll(func(flag *flag.Flag) {
+		if flag.Hidden {
+			return
+		}
+		format := "--%s=%s: %s\n"
+
+		if flag.Value.Type() == "string" {
+			format = "--%s='%s': %s\n"
+		}
+
+		if len(flag.Shorthand) > 0 {
+			format = "  -%s, " + format
+		} else {
+			format = "   %s   " + format
+		}
+
+		fmt.Fprintf(x, format, flag.Shorthand, flag.Name, flag.DefValue, flag.Usage)
+	})
+
+	return x.String()
+}
+
+func rpad(s string, padding int) string {
+	template := fmt.Sprintf("%%-%ds", padding)
+	return fmt.Sprintf(template, s)
+}
+
+func appendIfNotPresent(s, stringToAppend string) string {
+	if strings.Contains(s, stringToAppend) {
+		return s
+	}
+	return s + " " + stringToAppend
+}
+
+func flagsNotIntersected(l *flag.FlagSet, r *flag.FlagSet) *flag.FlagSet {
+	f := flag.NewFlagSet("notIntersected", flag.ContinueOnError)
+	l.VisitAll(func(flag *flag.Flag) {
+		if r.Lookup(flag.Name) == nil {
+			f.AddFlag(flag)
+		}
+	})
+	return f
+}
+
+func visibleFlags(l *flag.FlagSet) *flag.FlagSet {
+	hidden := "help"
+	f := flag.NewFlagSet("visible", flag.ContinueOnError)
+	l.VisitAll(func(flag *flag.Flag) {
+		if flag.Name != hidden {
+			f.AddFlag(flag)
+		}
+	})
+	return f
+}
+
+func filter(cmds []*cobra.Command, names ...string) []*cobra.Command {
+	out := []*cobra.Command{}
+	for _, c := range cmds {
+		if c.Hidden {
+			continue
+		}
+		skip := false
+		for _, name := range names {
+			if name == c.Name() {
+				skip = true
+				break
+			}
+		}
+		if skip {
+			continue
+		}
+		out = append(out, c)
+	}
+	return out
+}
diff --git a/vendor/k8s.io/kubectl/pkg/util/templates/templates.go b/vendor/k8s.io/kubectl/pkg/util/templates/templates.go
new file mode 100644
index 00000000..9f3b75b5
--- /dev/null
+++ b/vendor/k8s.io/kubectl/pkg/util/templates/templates.go
@@ -0,0 +1,103 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package templates
+
+import (
+	"strings"
+	"unicode"
+)
+
+const (
+	// SectionVars is the help template section that declares variables to be used in the template.
+	SectionVars = `{{$isRootCmd := isRootCmd .}}` +
+		`{{$rootCmd := rootCmd .}}` +
+		`{{$visibleFlags := visibleFlags (flagsNotIntersected .LocalFlags .PersistentFlags)}}` +
+		`{{$explicitlyExposedFlags := exposed .}}` +
+		`{{$optionsCmdFor := optionsCmdFor .}}` +
+		`{{$usageLine := usageLine .}}`
+
+	// SectionAliases is the help template section that displays command aliases.
+	SectionAliases = `{{if gt .Aliases 0}}Aliases:
+{{.NameAndAliases}}
+
+{{end}}`
+
+	// SectionExamples is the help template section that displays command examples.
+	SectionExamples = `{{if .HasExample}}Examples:
+{{trimRight .Example}}
+
+{{end}}`
+
+	// SectionSubcommands is the help template section that displays the command's subcommands.
+	SectionSubcommands = `{{if .HasAvailableSubCommands}}{{cmdGroupsString .}}
+
+{{end}}`
+
+	// SectionFlags is the help template section that displays the command's flags.
+	SectionFlags = `{{ if or $visibleFlags.HasFlags $explicitlyExposedFlags.HasFlags}}Options:
+{{ if $visibleFlags.HasFlags}}{{trimRight (flagsUsages $visibleFlags)}}{{end}}{{ if $explicitlyExposedFlags.HasFlags}}{{ if $visibleFlags.HasFlags}}
+{{end}}{{trimRight (flagsUsages $explicitlyExposedFlags)}}{{end}}
+
+{{end}}`
+
+	// SectionUsage is the help template section that displays the command's usage.
+	SectionUsage = `{{if and .Runnable (ne .UseLine "") (ne .UseLine $rootCmd)}}Usage:
+  {{$usageLine}}
+
+{{end}}`
+
+	// SectionTipsHelp is the help template section that displays the '--help' hint.
+	SectionTipsHelp = `{{if .HasSubCommands}}Use "{{$rootCmd}} <command> --help" for more information about a given command.
+{{end}}`
+
+	// SectionTipsGlobalOptions is the help template section that displays the 'options' hint for displaying global flags.
+	SectionTipsGlobalOptions = `{{if $optionsCmdFor}}Use "{{$optionsCmdFor}}" for a list of global command-line options (applies to all commands).
+{{end}}`
+)
+
+// MainHelpTemplate if the template for 'help' used by most commands.
+func MainHelpTemplate() string {
+	return `{{with or .Long .Short }}{{. | trim}}{{end}}{{if or .Runnable .HasSubCommands}}{{.UsageString}}{{end}}`
+}
+
+// MainUsageTemplate if the template for 'usage' used by most commands.
+func MainUsageTemplate() string {
+	sections := []string{
+		"\n\n",
+		SectionVars,
+		SectionAliases,
+		SectionExamples,
+		SectionSubcommands,
+		SectionFlags,
+		SectionUsage,
+		SectionTipsHelp,
+		SectionTipsGlobalOptions,
+	}
+	return strings.TrimRightFunc(strings.Join(sections, ""), unicode.IsSpace)
+}
+
+// OptionsHelpTemplate if the template for 'help' used by the 'options' command.
+func OptionsHelpTemplate() string {
+	return ""
+}
+
+// OptionsUsageTemplate if the template for 'usage' used by the 'options' command.
+func OptionsUsageTemplate() string {
+	return `{{ if .HasInheritedFlags}}The following options can be passed to any command:
+
+{{flagsUsages .InheritedFlags}}{{end}}`
+}
diff --git a/vendor/k8s.io/kubectl/pkg/util/term/resize.go b/vendor/k8s.io/kubectl/pkg/util/term/resize.go
new file mode 100644
index 00000000..7ca09a85
--- /dev/null
+++ b/vendor/k8s.io/kubectl/pkg/util/term/resize.go
@@ -0,0 +1,132 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package term
+
+import (
+	"fmt"
+
+	"github.com/docker/docker/pkg/term"
+	"k8s.io/apimachinery/pkg/util/runtime"
+	"k8s.io/client-go/tools/remotecommand"
+)
+
+// GetSize returns the current size of the user's terminal. If it isn't a terminal,
+// nil is returned.
+func (t TTY) GetSize() *remotecommand.TerminalSize {
+	outFd, isTerminal := term.GetFdInfo(t.Out)
+	if !isTerminal {
+		return nil
+	}
+	return GetSize(outFd)
+}
+
+// GetSize returns the current size of the terminal associated with fd.
+func GetSize(fd uintptr) *remotecommand.TerminalSize {
+	winsize, err := term.GetWinsize(fd)
+	if err != nil {
+		runtime.HandleError(fmt.Errorf("unable to get terminal size: %v", err))
+		return nil
+	}
+
+	return &remotecommand.TerminalSize{Width: winsize.Width, Height: winsize.Height}
+}
+
+// MonitorSize monitors the terminal's size. It returns a TerminalSizeQueue primed with
+// initialSizes, or nil if there's no TTY present.
+func (t *TTY) MonitorSize(initialSizes ...*remotecommand.TerminalSize) remotecommand.TerminalSizeQueue {
+	outFd, isTerminal := term.GetFdInfo(t.Out)
+	if !isTerminal {
+		return nil
+	}
+
+	t.sizeQueue = &sizeQueue{
+		t: *t,
+		// make it buffered so we can send the initial terminal sizes without blocking, prior to starting
+		// the streaming below
+		resizeChan:   make(chan remotecommand.TerminalSize, len(initialSizes)),
+		stopResizing: make(chan struct{}),
+	}
+
+	t.sizeQueue.monitorSize(outFd, initialSizes...)
+
+	return t.sizeQueue
+}
+
+// sizeQueue implements remotecommand.TerminalSizeQueue
+type sizeQueue struct {
+	t TTY
+	// resizeChan receives a Size each time the user's terminal is resized.
+	resizeChan   chan remotecommand.TerminalSize
+	stopResizing chan struct{}
+}
+
+// make sure sizeQueue implements the resize.TerminalSizeQueue interface
+var _ remotecommand.TerminalSizeQueue = &sizeQueue{}
+
+// monitorSize primes resizeChan with initialSizes and then monitors for resize events. With each
+// new event, it sends the current terminal size to resizeChan.
+func (s *sizeQueue) monitorSize(outFd uintptr, initialSizes ...*remotecommand.TerminalSize) {
+	// send the initial sizes
+	for i := range initialSizes {
+		if initialSizes[i] != nil {
+			s.resizeChan <- *initialSizes[i]
+		}
+	}
+
+	resizeEvents := make(chan remotecommand.TerminalSize, 1)
+
+	monitorResizeEvents(outFd, resizeEvents, s.stopResizing)
+
+	// listen for resize events in the background
+	go func() {
+		defer runtime.HandleCrash()
+
+		for {
+			select {
+			case size, ok := <-resizeEvents:
+				if !ok {
+					return
+				}
+
+				select {
+				// try to send the size to resizeChan, but don't block
+				case s.resizeChan <- size:
+					// send successful
+				default:
+					// unable to send / no-op
+				}
+			case <-s.stopResizing:
+				return
+			}
+		}
+	}()
+}
+
+// Next returns the new terminal size after the terminal has been resized. It returns nil when
+// monitoring has been stopped.
+func (s *sizeQueue) Next() *remotecommand.TerminalSize {
+	size, ok := <-s.resizeChan
+	if !ok {
+		return nil
+	}
+	return &size
+}
+
+// stop stops the background goroutine that is monitoring for terminal resizes.
+func (s *sizeQueue) stop() {
+	close(s.stopResizing)
+}
diff --git a/vendor/k8s.io/kubectl/pkg/util/term/resizeevents.go b/vendor/k8s.io/kubectl/pkg/util/term/resizeevents.go
new file mode 100644
index 00000000..e3476f97
--- /dev/null
+++ b/vendor/k8s.io/kubectl/pkg/util/term/resizeevents.go
@@ -0,0 +1,61 @@
+// +build !windows
+
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package term
+
+import (
+	"os"
+	"os/signal"
+
+	"golang.org/x/sys/unix"
+	"k8s.io/apimachinery/pkg/util/runtime"
+	"k8s.io/client-go/tools/remotecommand"
+)
+
+// monitorResizeEvents spawns a goroutine that waits for SIGWINCH signals (these indicate the
+// terminal has resized). After receiving a SIGWINCH, this gets the terminal size and tries to send
+// it to the resizeEvents channel. The goroutine stops when the stop channel is closed.
+func monitorResizeEvents(fd uintptr, resizeEvents chan<- remotecommand.TerminalSize, stop chan struct{}) {
+	go func() {
+		defer runtime.HandleCrash()
+
+		winch := make(chan os.Signal, 1)
+		signal.Notify(winch, unix.SIGWINCH)
+		defer signal.Stop(winch)
+
+		for {
+			select {
+			case <-winch:
+				size := GetSize(fd)
+				if size == nil {
+					return
+				}
+
+				// try to send size
+				select {
+				case resizeEvents <- *size:
+					// success
+				default:
+					// not sent
+				}
+			case <-stop:
+				return
+			}
+		}
+	}()
+}
diff --git a/vendor/k8s.io/kubectl/pkg/util/term/resizeevents_windows.go b/vendor/k8s.io/kubectl/pkg/util/term/resizeevents_windows.go
new file mode 100644
index 00000000..adccf873
--- /dev/null
+++ b/vendor/k8s.io/kubectl/pkg/util/term/resizeevents_windows.go
@@ -0,0 +1,62 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package term
+
+import (
+	"time"
+
+	"k8s.io/apimachinery/pkg/util/runtime"
+	"k8s.io/client-go/tools/remotecommand"
+)
+
+// monitorResizeEvents spawns a goroutine that periodically gets the terminal size and tries to send
+// it to the resizeEvents channel if the size has changed. The goroutine stops when the stop channel
+// is closed.
+func monitorResizeEvents(fd uintptr, resizeEvents chan<- remotecommand.TerminalSize, stop chan struct{}) {
+	go func() {
+		defer runtime.HandleCrash()
+
+		size := GetSize(fd)
+		if size == nil {
+			return
+		}
+		lastSize := *size
+
+		for {
+			// see if we need to stop running
+			select {
+			case <-stop:
+				return
+			default:
+			}
+
+			size := GetSize(fd)
+			if size == nil {
+				return
+			}
+
+			if size.Height != lastSize.Height || size.Width != lastSize.Width {
+				lastSize.Height = size.Height
+				lastSize.Width = size.Width
+				resizeEvents <- *size
+			}
+
+			// sleep to avoid hot looping
+			time.Sleep(250 * time.Millisecond)
+		}
+	}()
+}
diff --git a/vendor/k8s.io/kubectl/pkg/util/term/term.go b/vendor/k8s.io/kubectl/pkg/util/term/term.go
new file mode 100644
index 00000000..18183c0c
--- /dev/null
+++ b/vendor/k8s.io/kubectl/pkg/util/term/term.go
@@ -0,0 +1,110 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package term
+
+import (
+	"io"
+	"os"
+
+	"github.com/docker/docker/pkg/term"
+
+	"k8s.io/kubectl/pkg/util/interrupt"
+)
+
+// SafeFunc is a function to be invoked by TTY.
+type SafeFunc func() error
+
+// TTY helps invoke a function and preserve the state of the terminal, even if the process is
+// terminated during execution. It also provides support for terminal resizing for remote command
+// execution/attachment.
+type TTY struct {
+	// In is a reader representing stdin. It is a required field.
+	In io.Reader
+	// Out is a writer representing stdout. It must be set to support terminal resizing. It is an
+	// optional field.
+	Out io.Writer
+	// Raw is true if the terminal should be set raw.
+	Raw bool
+	// TryDev indicates the TTY should try to open /dev/tty if the provided input
+	// is not a file descriptor.
+	TryDev bool
+	// Parent is an optional interrupt handler provided to this function - if provided
+	// it will be invoked after the terminal state is restored. If it is not provided,
+	// a signal received during the TTY will result in os.Exit(0) being invoked.
+	Parent *interrupt.Handler
+
+	// sizeQueue is set after a call to MonitorSize() and is used to monitor SIGWINCH signals when the
+	// user's terminal resizes.
+	sizeQueue *sizeQueue
+}
+
+// IsTerminalIn returns true if t.In is a terminal. Does not check /dev/tty
+// even if TryDev is set.
+func (t TTY) IsTerminalIn() bool {
+	return IsTerminal(t.In)
+}
+
+// IsTerminalOut returns true if t.Out is a terminal. Does not check /dev/tty
+// even if TryDev is set.
+func (t TTY) IsTerminalOut() bool {
+	return IsTerminal(t.Out)
+}
+
+// IsTerminal returns whether the passed object is a terminal or not
+func IsTerminal(i interface{}) bool {
+	_, terminal := term.GetFdInfo(i)
+	return terminal
+}
+
+// Safe invokes the provided function and will attempt to ensure that when the
+// function returns (or a termination signal is sent) that the terminal state
+// is reset to the condition it was in prior to the function being invoked. If
+// t.Raw is true the terminal will be put into raw mode prior to calling the function.
+// If the input file descriptor is not a TTY and TryDev is true, the /dev/tty file
+// will be opened (if available).
+func (t TTY) Safe(fn SafeFunc) error {
+	inFd, isTerminal := term.GetFdInfo(t.In)
+
+	if !isTerminal && t.TryDev {
+		if f, err := os.Open("/dev/tty"); err == nil {
+			defer f.Close()
+			inFd = f.Fd()
+			isTerminal = term.IsTerminal(inFd)
+		}
+	}
+	if !isTerminal {
+		return fn()
+	}
+
+	var state *term.State
+	var err error
+	if t.Raw {
+		state, err = term.MakeRaw(inFd)
+	} else {
+		state, err = term.SaveState(inFd)
+	}
+	if err != nil {
+		return err
+	}
+	return interrupt.Chain(t.Parent, func() {
+		if t.sizeQueue != nil {
+			t.sizeQueue.stop()
+		}
+
+		term.RestoreTerminal(inFd, state)
+	}).Run(fn)
+}
diff --git a/vendor/k8s.io/kubectl/pkg/util/term/term_writer.go b/vendor/k8s.io/kubectl/pkg/util/term/term_writer.go
new file mode 100644
index 00000000..2d72d1e4
--- /dev/null
+++ b/vendor/k8s.io/kubectl/pkg/util/term/term_writer.go
@@ -0,0 +1,124 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package term
+
+import (
+	"io"
+	"os"
+
+	"github.com/docker/docker/pkg/term"
+	wordwrap "github.com/mitchellh/go-wordwrap"
+)
+
+type wordWrapWriter struct {
+	limit  uint
+	writer io.Writer
+}
+
+// NewResponsiveWriter creates a Writer that detects the column width of the
+// terminal we are in, and adjusts every line width to fit and use recommended
+// terminal sizes for better readability. Does proper word wrapping automatically.
+//    if terminal width >= 120 columns		use 120 columns
+//    if terminal width >= 100 columns		use 100 columns
+//    if terminal width >=  80 columns		use  80 columns
+// In case we're not in a terminal or if it's smaller than 80 columns width,
+// doesn't do any wrapping.
+func NewResponsiveWriter(w io.Writer) io.Writer {
+	file, ok := w.(*os.File)
+	if !ok {
+		return w
+	}
+	fd := file.Fd()
+	if !term.IsTerminal(fd) {
+		return w
+	}
+
+	terminalSize := GetSize(fd)
+	if terminalSize == nil {
+		return w
+	}
+
+	var limit uint
+	switch {
+	case terminalSize.Width >= 120:
+		limit = 120
+	case terminalSize.Width >= 100:
+		limit = 100
+	case terminalSize.Width >= 80:
+		limit = 80
+	}
+
+	return NewWordWrapWriter(w, limit)
+}
+
+// NewWordWrapWriter is a Writer that supports a limit of characters on every line
+// and does auto word wrapping that respects that limit.
+func NewWordWrapWriter(w io.Writer, limit uint) io.Writer {
+	return &wordWrapWriter{
+		limit:  limit,
+		writer: w,
+	}
+}
+
+func (w wordWrapWriter) Write(p []byte) (nn int, err error) {
+	if w.limit == 0 {
+		return w.writer.Write(p)
+	}
+	original := string(p)
+	wrapped := wordwrap.WrapString(original, w.limit)
+	return w.writer.Write([]byte(wrapped))
+}
+
+// NewPunchCardWriter is a NewWordWrapWriter that limits the line width to 80 columns.
+func NewPunchCardWriter(w io.Writer) io.Writer {
+	return NewWordWrapWriter(w, 80)
+}
+
+type maxWidthWriter struct {
+	maxWidth     uint
+	currentWidth uint
+	written      uint
+	writer       io.Writer
+}
+
+// NewMaxWidthWriter is a Writer that supports a limit of characters on every
+// line, but doesn't do any word wrapping automatically.
+func NewMaxWidthWriter(w io.Writer, maxWidth uint) io.Writer {
+	return &maxWidthWriter{
+		maxWidth: maxWidth,
+		writer:   w,
+	}
+}
+
+func (m maxWidthWriter) Write(p []byte) (nn int, err error) {
+	for _, b := range p {
+		if m.currentWidth == m.maxWidth {
+			m.writer.Write([]byte{'\n'})
+			m.currentWidth = 0
+		}
+		if b == '\n' {
+			m.currentWidth = 0
+		}
+		_, err := m.writer.Write([]byte{b})
+		if err != nil {
+			return int(m.written), err
+		}
+		m.written++
+		m.currentWidth++
+	}
+	return len(p), nil
+}
diff --git a/vendor/k8s.io/kubectl/pkg/validation/schema.go b/vendor/k8s.io/kubectl/pkg/validation/schema.go
new file mode 100644
index 00000000..6eef6193
--- /dev/null
+++ b/vendor/k8s.io/kubectl/pkg/validation/schema.go
@@ -0,0 +1,103 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package validation
+
+import (
+	"bytes"
+	"encoding/json"
+	"fmt"
+
+	ejson "github.com/exponent-io/jsonpath"
+	utilerrors "k8s.io/apimachinery/pkg/util/errors"
+)
+
+// Schema is an interface that knows how to validate an API object serialized to a byte array.
+type Schema interface {
+	ValidateBytes(data []byte) error
+}
+
+// NullSchema always validates bytes.
+type NullSchema struct{}
+
+// ValidateBytes never fails for NullSchema.
+func (NullSchema) ValidateBytes(data []byte) error { return nil }
+
+// NoDoubleKeySchema is a schema that disallows double keys.
+type NoDoubleKeySchema struct{}
+
+// ValidateBytes validates bytes.
+func (NoDoubleKeySchema) ValidateBytes(data []byte) error {
+	var list []error
+	if err := validateNoDuplicateKeys(data, "metadata", "labels"); err != nil {
+		list = append(list, err)
+	}
+	if err := validateNoDuplicateKeys(data, "metadata", "annotations"); err != nil {
+		list = append(list, err)
+	}
+	return utilerrors.NewAggregate(list)
+}
+
+func validateNoDuplicateKeys(data []byte, path ...string) error {
+	r := ejson.NewDecoder(bytes.NewReader(data))
+	// This is Go being unfriendly. The 'path ...string' comes in as a
+	// []string, and SeekTo takes ...interface{}, so we can't just pass
+	// the path straight in, we have to copy it.  *sigh*
+	ifacePath := []interface{}{}
+	for ix := range path {
+		ifacePath = append(ifacePath, path[ix])
+	}
+	found, err := r.SeekTo(ifacePath...)
+	if err != nil {
+		return err
+	}
+	if !found {
+		return nil
+	}
+	seen := map[string]bool{}
+	for {
+		tok, err := r.Token()
+		if err != nil {
+			return err
+		}
+		switch t := tok.(type) {
+		case json.Delim:
+			if t.String() == "}" {
+				return nil
+			}
+		case ejson.KeyString:
+			if seen[string(t)] {
+				return fmt.Errorf("duplicate key: %s", string(t))
+			}
+			seen[string(t)] = true
+		}
+	}
+}
+
+// ConjunctiveSchema encapsulates a schema list.
+type ConjunctiveSchema []Schema
+
+// ValidateBytes validates bytes per a ConjunctiveSchema.
+func (c ConjunctiveSchema) ValidateBytes(data []byte) error {
+	var list []error
+	schemas := []Schema(c)
+	for ix := range schemas {
+		if err := schemas[ix].ValidateBytes(data); err != nil {
+			list = append(list, err)
+		}
+	}
+	return utilerrors.NewAggregate(list)
+}
diff --git a/vendor/k8s.io/utils/exec/doc.go b/vendor/k8s.io/utils/exec/doc.go
new file mode 100644
index 00000000..cbb44bdb
--- /dev/null
+++ b/vendor/k8s.io/utils/exec/doc.go
@@ -0,0 +1,18 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package exec provides an injectable interface and implementations for running commands.
+package exec // import "k8s.io/utils/exec"
diff --git a/vendor/k8s.io/utils/exec/exec.go b/vendor/k8s.io/utils/exec/exec.go
new file mode 100644
index 00000000..96bec01c
--- /dev/null
+++ b/vendor/k8s.io/utils/exec/exec.go
@@ -0,0 +1,252 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package exec
+
+import (
+	"context"
+	"io"
+	osexec "os/exec"
+	"syscall"
+	"time"
+)
+
+// ErrExecutableNotFound is returned if the executable is not found.
+var ErrExecutableNotFound = osexec.ErrNotFound
+
+// Interface is an interface that presents a subset of the os/exec API. Use this
+// when you want to inject fakeable/mockable exec behavior.
+type Interface interface {
+	// Command returns a Cmd instance which can be used to run a single command.
+	// This follows the pattern of package os/exec.
+	Command(cmd string, args ...string) Cmd
+
+	// CommandContext returns a Cmd instance which can be used to run a single command.
+	//
+	// The provided context is used to kill the process if the context becomes done
+	// before the command completes on its own. For example, a timeout can be set in
+	// the context.
+	CommandContext(ctx context.Context, cmd string, args ...string) Cmd
+
+	// LookPath wraps os/exec.LookPath
+	LookPath(file string) (string, error)
+}
+
+// Cmd is an interface that presents an API that is very similar to Cmd from os/exec.
+// As more functionality is needed, this can grow. Since Cmd is a struct, we will have
+// to replace fields with get/set method pairs.
+type Cmd interface {
+	// Run runs the command to the completion.
+	Run() error
+	// CombinedOutput runs the command and returns its combined standard output
+	// and standard error. This follows the pattern of package os/exec.
+	CombinedOutput() ([]byte, error)
+	// Output runs the command and returns standard output, but not standard err
+	Output() ([]byte, error)
+	SetDir(dir string)
+	SetStdin(in io.Reader)
+	SetStdout(out io.Writer)
+	SetStderr(out io.Writer)
+	SetEnv(env []string)
+
+	// StdoutPipe and StderrPipe for getting the process' Stdout and Stderr as
+	// Readers
+	StdoutPipe() (io.ReadCloser, error)
+	StderrPipe() (io.ReadCloser, error)
+
+	// Start and Wait are for running a process non-blocking
+	Start() error
+	Wait() error
+
+	// Stops the command by sending SIGTERM. It is not guaranteed the
+	// process will stop before this function returns. If the process is not
+	// responding, an internal timer function will send a SIGKILL to force
+	// terminate after 10 seconds.
+	Stop()
+}
+
+// ExitError is an interface that presents an API similar to os.ProcessState, which is
+// what ExitError from os/exec is. This is designed to make testing a bit easier and
+// probably loses some of the cross-platform properties of the underlying library.
+type ExitError interface {
+	String() string
+	Error() string
+	Exited() bool
+	ExitStatus() int
+}
+
+// Implements Interface in terms of really exec()ing.
+type executor struct{}
+
+// New returns a new Interface which will os/exec to run commands.
+func New() Interface {
+	return &executor{}
+}
+
+// Command is part of the Interface interface.
+func (executor *executor) Command(cmd string, args ...string) Cmd {
+	return (*cmdWrapper)(osexec.Command(cmd, args...))
+}
+
+// CommandContext is part of the Interface interface.
+func (executor *executor) CommandContext(ctx context.Context, cmd string, args ...string) Cmd {
+	return (*cmdWrapper)(osexec.CommandContext(ctx, cmd, args...))
+}
+
+// LookPath is part of the Interface interface
+func (executor *executor) LookPath(file string) (string, error) {
+	return osexec.LookPath(file)
+}
+
+// Wraps exec.Cmd so we can capture errors.
+type cmdWrapper osexec.Cmd
+
+var _ Cmd = &cmdWrapper{}
+
+func (cmd *cmdWrapper) SetDir(dir string) {
+	cmd.Dir = dir
+}
+
+func (cmd *cmdWrapper) SetStdin(in io.Reader) {
+	cmd.Stdin = in
+}
+
+func (cmd *cmdWrapper) SetStdout(out io.Writer) {
+	cmd.Stdout = out
+}
+
+func (cmd *cmdWrapper) SetStderr(out io.Writer) {
+	cmd.Stderr = out
+}
+
+func (cmd *cmdWrapper) SetEnv(env []string) {
+	cmd.Env = env
+}
+
+func (cmd *cmdWrapper) StdoutPipe() (io.ReadCloser, error) {
+	r, err := (*osexec.Cmd)(cmd).StdoutPipe()
+	return r, handleError(err)
+}
+
+func (cmd *cmdWrapper) StderrPipe() (io.ReadCloser, error) {
+	r, err := (*osexec.Cmd)(cmd).StderrPipe()
+	return r, handleError(err)
+}
+
+func (cmd *cmdWrapper) Start() error {
+	err := (*osexec.Cmd)(cmd).Start()
+	return handleError(err)
+}
+
+func (cmd *cmdWrapper) Wait() error {
+	err := (*osexec.Cmd)(cmd).Wait()
+	return handleError(err)
+}
+
+// Run is part of the Cmd interface.
+func (cmd *cmdWrapper) Run() error {
+	err := (*osexec.Cmd)(cmd).Run()
+	return handleError(err)
+}
+
+// CombinedOutput is part of the Cmd interface.
+func (cmd *cmdWrapper) CombinedOutput() ([]byte, error) {
+	out, err := (*osexec.Cmd)(cmd).CombinedOutput()
+	return out, handleError(err)
+}
+
+func (cmd *cmdWrapper) Output() ([]byte, error) {
+	out, err := (*osexec.Cmd)(cmd).Output()
+	return out, handleError(err)
+}
+
+// Stop is part of the Cmd interface.
+func (cmd *cmdWrapper) Stop() {
+	c := (*osexec.Cmd)(cmd)
+
+	if c.Process == nil {
+		return
+	}
+
+	c.Process.Signal(syscall.SIGTERM)
+
+	time.AfterFunc(10*time.Second, func() {
+		if !c.ProcessState.Exited() {
+			c.Process.Signal(syscall.SIGKILL)
+		}
+	})
+}
+
+func handleError(err error) error {
+	if err == nil {
+		return nil
+	}
+
+	switch e := err.(type) {
+	case *osexec.ExitError:
+		return &ExitErrorWrapper{e}
+	case *osexec.Error:
+		if e.Err == osexec.ErrNotFound {
+			return ErrExecutableNotFound
+		}
+	}
+
+	return err
+}
+
+// ExitErrorWrapper is an implementation of ExitError in terms of os/exec ExitError.
+// Note: standard exec.ExitError is type *os.ProcessState, which already implements Exited().
+type ExitErrorWrapper struct {
+	*osexec.ExitError
+}
+
+var _ ExitError = &ExitErrorWrapper{}
+
+// ExitStatus is part of the ExitError interface.
+func (eew ExitErrorWrapper) ExitStatus() int {
+	ws, ok := eew.Sys().(syscall.WaitStatus)
+	if !ok {
+		panic("can't call ExitStatus() on a non-WaitStatus exitErrorWrapper")
+	}
+	return ws.ExitStatus()
+}
+
+// CodeExitError is an implementation of ExitError consisting of an error object
+// and an exit code (the upper bits of os.exec.ExitStatus).
+type CodeExitError struct {
+	Err  error
+	Code int
+}
+
+var _ ExitError = CodeExitError{}
+
+func (e CodeExitError) Error() string {
+	return e.Err.Error()
+}
+
+func (e CodeExitError) String() string {
+	return e.Err.Error()
+}
+
+// Exited is to check if the process has finished
+func (e CodeExitError) Exited() bool {
+	return true
+}
+
+// ExitStatus is for checking the error code
+func (e CodeExitError) ExitStatus() int {
+	return e.Code
+}
diff --git a/vendor/modules.txt b/vendor/modules.txt
index 65d63bc8..7bce1385 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -1,3 +1,8 @@
+# github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78
+github.com/Azure/go-ansiterm
+github.com/Azure/go-ansiterm/winterm
+# github.com/MakeNowJust/heredoc v0.0.0-20170808103936-bb23615498cd
+github.com/MakeNowJust/heredoc
 # github.com/Masterminds/goutils v1.1.0
 github.com/Masterminds/goutils
 # github.com/Masterminds/semver/v3 v3.0.1
@@ -6,6 +11,10 @@ github.com/Masterminds/semver/v3
 github.com/Masterminds/sprig/v3
 # github.com/Microsoft/go-winio v0.4.12
 github.com/Microsoft/go-winio
+# github.com/PuerkitoBio/purell v1.1.1
+github.com/PuerkitoBio/purell
+# github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578
+github.com/PuerkitoBio/urlesc
 # github.com/beorn7/perks v1.0.1
 github.com/beorn7/perks/quantile
 # github.com/blang/semver v3.5.1+incompatible
@@ -48,12 +57,24 @@ github.com/docker/docker/api/types/volume
 github.com/docker/docker/client
 github.com/docker/docker/errdefs
 github.com/docker/docker/pkg/stdcopy
+github.com/docker/docker/pkg/term
+github.com/docker/docker/pkg/term/windows
 # github.com/docker/go-connections v0.4.0
 github.com/docker/go-connections/nat
 github.com/docker/go-connections/sockets
 github.com/docker/go-connections/tlsconfig
 # github.com/docker/go-units v0.4.0
 github.com/docker/go-units
+# github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96
+github.com/docker/spdystream
+github.com/docker/spdystream/spdy
+# github.com/emicklei/go-restful v2.9.5+incompatible
+github.com/emicklei/go-restful
+github.com/emicklei/go-restful/log
+# github.com/evanphx/json-patch v4.5.0+incompatible
+github.com/evanphx/json-patch
+# github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d
+github.com/exponent-io/jsonpath
 # github.com/ghodss/yaml v1.0.0
 github.com/ghodss/yaml
 # github.com/go-bindata/go-bindata v3.1.2+incompatible
@@ -61,6 +82,14 @@ github.com/go-bindata/go-bindata
 github.com/go-bindata/go-bindata/go-bindata
 # github.com/go-ini/ini v1.37.0
 github.com/go-ini/ini
+# github.com/go-openapi/jsonpointer v0.19.3
+github.com/go-openapi/jsonpointer
+# github.com/go-openapi/jsonreference v0.19.3
+github.com/go-openapi/jsonreference
+# github.com/go-openapi/spec v0.19.3
+github.com/go-openapi/spec
+# github.com/go-openapi/swag v0.19.5
+github.com/go-openapi/swag
 # github.com/gogo/protobuf v1.3.1
 github.com/gogo/protobuf/proto
 github.com/gogo/protobuf/sortkeys
@@ -70,13 +99,15 @@ github.com/golang/protobuf/ptypes
 github.com/golang/protobuf/ptypes/any
 github.com/golang/protobuf/ptypes/duration
 github.com/golang/protobuf/ptypes/timestamp
+# github.com/google/btree v1.0.0
+github.com/google/btree
 # github.com/google/go-cmp v0.4.0
 github.com/google/go-cmp/cmp
 github.com/google/go-cmp/cmp/internal/diff
 github.com/google/go-cmp/cmp/internal/flags
 github.com/google/go-cmp/cmp/internal/function
 github.com/google/go-cmp/cmp/internal/value
-# github.com/google/gofuzz v1.0.0
+# github.com/google/gofuzz v1.1.0
 github.com/google/gofuzz
 # github.com/google/uuid v1.1.1
 github.com/google/uuid
@@ -84,6 +115,9 @@ github.com/google/uuid
 github.com/googleapis/gnostic/OpenAPIv2
 github.com/googleapis/gnostic/compiler
 github.com/googleapis/gnostic/extensions
+# github.com/gregjones/httpcache v0.0.0-20190212212710-3befbb6ad0cc
+github.com/gregjones/httpcache
+github.com/gregjones/httpcache/diskcache
 # github.com/hashicorp/golang-lru v0.5.3
 github.com/hashicorp/golang-lru
 github.com/hashicorp/golang-lru/simplelru
@@ -91,10 +125,18 @@ github.com/hashicorp/golang-lru/simplelru
 github.com/huandu/xstrings
 # github.com/imdario/mergo v0.3.7
 github.com/imdario/mergo
+# github.com/inconshreveable/mousetrap v1.0.0
+github.com/inconshreveable/mousetrap
 # github.com/json-iterator/go v1.1.9
 github.com/json-iterator/go
 # github.com/konsorten/go-windows-terminal-sequences v1.0.2
 github.com/konsorten/go-windows-terminal-sequences
+# github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de
+github.com/liggitt/tabwriter
+# github.com/mailru/easyjson v0.7.0
+github.com/mailru/easyjson/buffer
+github.com/mailru/easyjson/jlexer
+github.com/mailru/easyjson/jwriter
 # github.com/mattn/go-colorable v0.1.2
 github.com/mattn/go-colorable
 # github.com/mattn/go-isatty v0.0.8
@@ -105,6 +147,8 @@ github.com/matttproud/golang_protobuf_extensions/pbutil
 github.com/mcuadros/go-version
 # github.com/mitchellh/copystructure v1.0.0
 github.com/mitchellh/copystructure
+# github.com/mitchellh/go-wordwrap v1.0.0
+github.com/mitchellh/go-wordwrap
 # github.com/mitchellh/reflectwalk v1.0.0
 github.com/mitchellh/reflectwalk
 # github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd
@@ -116,6 +160,8 @@ github.com/opencontainers/go-digest
 # github.com/opencontainers/image-spec v1.0.1
 github.com/opencontainers/image-spec/specs-go
 github.com/opencontainers/image-spec/specs-go/v1
+# github.com/peterbourgon/diskv v2.0.1+incompatible
+github.com/peterbourgon/diskv
 # github.com/pkg/errors v0.8.1
 github.com/pkg/errors
 # github.com/pmezard/go-difflib v1.0.0
@@ -133,7 +179,7 @@ github.com/prometheus/common/model
 github.com/prometheus/procfs
 github.com/prometheus/procfs/internal/fs
 github.com/prometheus/procfs/internal/util
-# github.com/rancher/norman v0.0.0-20200312033725-5c74e1ee1e6d
+# github.com/rancher/norman v0.0.0-20200326201949-eb806263e8ad
 github.com/rancher/norman/condition
 github.com/rancher/norman/controller
 github.com/rancher/norman/httperror
@@ -148,26 +194,31 @@ github.com/rancher/norman/types/convert
 github.com/rancher/norman/types/definition
 github.com/rancher/norman/types/slice
 github.com/rancher/norman/types/values
-# github.com/rancher/types v0.0.0-20200326224903-b4612bd96d9b
+# github.com/rancher/types v0.0.0-20200326224235-0d1e1dcc8d55
 github.com/rancher/types/apis/management.cattle.io/v3
 github.com/rancher/types/apis/project.cattle.io/v3
 github.com/rancher/types/condition
 github.com/rancher/types/image
 github.com/rancher/types/kdm
-# github.com/rancher/wrangler v0.5.0
+# github.com/rancher/wrangler v0.5.4-0.20200326191509-4054411d9736
 github.com/rancher/wrangler/pkg/name
 github.com/rancher/wrangler/pkg/ratelimit
+# github.com/russross/blackfriday v1.5.2
+github.com/russross/blackfriday
 # github.com/sirupsen/logrus v1.4.2
 github.com/sirupsen/logrus
 # github.com/spf13/cast v1.3.0
 github.com/spf13/cast
+# github.com/spf13/cobra v0.0.5
+github.com/spf13/cobra
 # github.com/spf13/pflag v1.0.5
 github.com/spf13/pflag
 # github.com/stretchr/testify v1.4.0
 github.com/stretchr/testify/assert
 # github.com/urfave/cli v1.20.0
 github.com/urfave/cli
-# golang.org/x/crypto v0.0.0-20191112222119-e1110fd1c708
+# golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975
+golang.org/x/crypto/blowfish
 golang.org/x/crypto/chacha20
 golang.org/x/crypto/curve25519
 golang.org/x/crypto/ed25519
@@ -178,6 +229,7 @@ golang.org/x/crypto/poly1305
 golang.org/x/crypto/scrypt
 golang.org/x/crypto/ssh
 golang.org/x/crypto/ssh/agent
+golang.org/x/crypto/ssh/internal/bcrypt_pbkdf
 golang.org/x/crypto/ssh/terminal
 # golang.org/x/net v0.0.0-20191112182307-2180aed22343
 golang.org/x/net/context
@@ -198,10 +250,17 @@ golang.org/x/sys/cpu
 golang.org/x/sys/unix
 golang.org/x/sys/windows
 # golang.org/x/text v0.3.2
+golang.org/x/text/encoding
+golang.org/x/text/encoding/internal
+golang.org/x/text/encoding/internal/identifier
+golang.org/x/text/encoding/unicode
+golang.org/x/text/internal/utf8internal
+golang.org/x/text/runes
 golang.org/x/text/secure/bidirule
 golang.org/x/text/transform
 golang.org/x/text/unicode/bidi
 golang.org/x/text/unicode/norm
+golang.org/x/text/width
 # golang.org/x/time v0.0.0-20191024005414-555d28b269f0
 golang.org/x/time/rate
 # google.golang.org/appengine v1.6.5
@@ -214,7 +273,7 @@ google.golang.org/appengine/internal/urlfetch
 google.golang.org/appengine/urlfetch
 # google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9
 google.golang.org/genproto/googleapis/rpc/status
-# google.golang.org/grpc v1.25.1
+# google.golang.org/grpc v1.26.0
 google.golang.org/grpc/codes
 google.golang.org/grpc/connectivity
 google.golang.org/grpc/grpclog
@@ -222,9 +281,11 @@ google.golang.org/grpc/internal
 google.golang.org/grpc/status
 # gopkg.in/inf.v0 v0.9.1
 gopkg.in/inf.v0
-# gopkg.in/yaml.v2 v2.2.5
+# gopkg.in/yaml.v2 v2.2.8
 gopkg.in/yaml.v2
-# k8s.io/api v0.17.2
+# k8s.io/api v0.18.0
+k8s.io/api/admission/v1
+k8s.io/api/admission/v1beta1
 k8s.io/api/admissionregistration/v1
 k8s.io/api/admissionregistration/v1beta1
 k8s.io/api/apps/v1
@@ -250,6 +311,7 @@ k8s.io/api/discovery/v1beta1
 k8s.io/api/events/v1beta1
 k8s.io/api/extensions/v1beta1
 k8s.io/api/flowcontrol/v1alpha1
+k8s.io/api/imagepolicy/v1alpha1
 k8s.io/api/networking/v1
 k8s.io/api/networking/v1beta1
 k8s.io/api/node/v1alpha1
@@ -265,13 +327,17 @@ k8s.io/api/settings/v1alpha1
 k8s.io/api/storage/v1
 k8s.io/api/storage/v1alpha1
 k8s.io/api/storage/v1beta1
-# k8s.io/apimachinery v0.17.2
+# k8s.io/apimachinery v0.18.0
+k8s.io/apimachinery/pkg/api/equality
 k8s.io/apimachinery/pkg/api/errors
 k8s.io/apimachinery/pkg/api/meta
 k8s.io/apimachinery/pkg/api/resource
+k8s.io/apimachinery/pkg/api/validation
 k8s.io/apimachinery/pkg/apis/meta/internalversion
 k8s.io/apimachinery/pkg/apis/meta/v1
 k8s.io/apimachinery/pkg/apis/meta/v1/unstructured
+k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructuredscheme
+k8s.io/apimachinery/pkg/apis/meta/v1/validation
 k8s.io/apimachinery/pkg/apis/meta/v1beta1
 k8s.io/apimachinery/pkg/conversion
 k8s.io/apimachinery/pkg/conversion/queryparams
@@ -290,14 +356,18 @@ k8s.io/apimachinery/pkg/types
 k8s.io/apimachinery/pkg/util/cache
 k8s.io/apimachinery/pkg/util/clock
 k8s.io/apimachinery/pkg/util/diff
+k8s.io/apimachinery/pkg/util/duration
 k8s.io/apimachinery/pkg/util/errors
 k8s.io/apimachinery/pkg/util/framer
+k8s.io/apimachinery/pkg/util/httpstream
+k8s.io/apimachinery/pkg/util/httpstream/spdy
 k8s.io/apimachinery/pkg/util/intstr
 k8s.io/apimachinery/pkg/util/json
 k8s.io/apimachinery/pkg/util/mergepatch
 k8s.io/apimachinery/pkg/util/naming
 k8s.io/apimachinery/pkg/util/net
 k8s.io/apimachinery/pkg/util/rand
+k8s.io/apimachinery/pkg/util/remotecommand
 k8s.io/apimachinery/pkg/util/runtime
 k8s.io/apimachinery/pkg/util/sets
 k8s.io/apimachinery/pkg/util/strategicpatch
@@ -308,16 +378,32 @@ k8s.io/apimachinery/pkg/util/yaml
 k8s.io/apimachinery/pkg/version
 k8s.io/apimachinery/pkg/watch
 k8s.io/apimachinery/third_party/forked/golang/json
+k8s.io/apimachinery/third_party/forked/golang/netutil
 k8s.io/apimachinery/third_party/forked/golang/reflect
-# k8s.io/apiserver v0.17.2
+# k8s.io/apiserver v0.18.0
 k8s.io/apiserver/pkg/apis/apiserver
 k8s.io/apiserver/pkg/apis/apiserver/v1alpha1
 k8s.io/apiserver/pkg/apis/audit
 k8s.io/apiserver/pkg/apis/audit/v1
 k8s.io/apiserver/pkg/apis/config
 k8s.io/apiserver/pkg/apis/config/v1
-# k8s.io/client-go v12.0.0+incompatible => k8s.io/client-go v0.17.2
+# k8s.io/cli-runtime v0.18.0
+k8s.io/cli-runtime/pkg/genericclioptions
+k8s.io/cli-runtime/pkg/kustomize
+k8s.io/cli-runtime/pkg/kustomize/k8sdeps
+k8s.io/cli-runtime/pkg/kustomize/k8sdeps/configmapandsecret
+k8s.io/cli-runtime/pkg/kustomize/k8sdeps/kunstruct
+k8s.io/cli-runtime/pkg/kustomize/k8sdeps/kv
+k8s.io/cli-runtime/pkg/kustomize/k8sdeps/transformer
+k8s.io/cli-runtime/pkg/kustomize/k8sdeps/transformer/hash
+k8s.io/cli-runtime/pkg/kustomize/k8sdeps/transformer/patch
+k8s.io/cli-runtime/pkg/kustomize/k8sdeps/validator
+k8s.io/cli-runtime/pkg/printers
+k8s.io/cli-runtime/pkg/resource
+# k8s.io/client-go v12.0.0+incompatible => k8s.io/client-go v0.18.0
 k8s.io/client-go/discovery
+k8s.io/client-go/discovery/cached/disk
+k8s.io/client-go/dynamic
 k8s.io/client-go/kubernetes
 k8s.io/client-go/kubernetes/scheme
 k8s.io/client-go/kubernetes/typed/admissionregistration/v1
@@ -367,6 +453,16 @@ k8s.io/client-go/pkg/version
 k8s.io/client-go/plugin/pkg/client/auth/exec
 k8s.io/client-go/rest
 k8s.io/client-go/rest/watch
+k8s.io/client-go/restmapper
+k8s.io/client-go/scale
+k8s.io/client-go/scale/scheme
+k8s.io/client-go/scale/scheme/appsint
+k8s.io/client-go/scale/scheme/appsv1beta1
+k8s.io/client-go/scale/scheme/appsv1beta2
+k8s.io/client-go/scale/scheme/autoscalingv1
+k8s.io/client-go/scale/scheme/extensionsint
+k8s.io/client-go/scale/scheme/extensionsv1beta1
+k8s.io/client-go/third_party/forked/golang/template
 k8s.io/client-go/tools/auth
 k8s.io/client-go/tools/cache
 k8s.io/client-go/tools/clientcmd
@@ -376,23 +472,64 @@ k8s.io/client-go/tools/clientcmd/api/v1
 k8s.io/client-go/tools/metrics
 k8s.io/client-go/tools/pager
 k8s.io/client-go/tools/reference
+k8s.io/client-go/tools/remotecommand
 k8s.io/client-go/transport
+k8s.io/client-go/transport/spdy
 k8s.io/client-go/util/cert
 k8s.io/client-go/util/connrotation
+k8s.io/client-go/util/exec
 k8s.io/client-go/util/flowcontrol
 k8s.io/client-go/util/homedir
+k8s.io/client-go/util/jsonpath
 k8s.io/client-go/util/keyutil
-k8s.io/client-go/util/retry
 k8s.io/client-go/util/workqueue
+# k8s.io/component-base v0.18.0
+k8s.io/component-base/version
 # k8s.io/klog v1.0.0
 k8s.io/klog
-# k8s.io/kube-openapi v0.0.0-20191107075043-30be4d16710a
+# k8s.io/kube-openapi v0.0.0-20200121204235-bf4fb3bd569c
+k8s.io/kube-openapi/pkg/common
 k8s.io/kube-openapi/pkg/util/proto
-# k8s.io/kubectl v0.17.2
+k8s.io/kube-openapi/pkg/util/proto/validation
+# k8s.io/kubectl v0.18.0
+k8s.io/kubectl/pkg/cmd/util
 k8s.io/kubectl/pkg/drain
-# k8s.io/utils v0.0.0-20191114200735-6ca3b61696b6
+k8s.io/kubectl/pkg/scheme
+k8s.io/kubectl/pkg/util/interrupt
+k8s.io/kubectl/pkg/util/openapi
+k8s.io/kubectl/pkg/util/openapi/validation
+k8s.io/kubectl/pkg/util/templates
+k8s.io/kubectl/pkg/util/term
+k8s.io/kubectl/pkg/validation
+# k8s.io/utils v0.0.0-20200324210504-a9aa75ae1b89
 k8s.io/utils/buffer
+k8s.io/utils/exec
 k8s.io/utils/integer
 k8s.io/utils/trace
-# sigs.k8s.io/yaml v1.1.0
+# sigs.k8s.io/kustomize v2.0.3+incompatible
+sigs.k8s.io/kustomize/pkg/commands/build
+sigs.k8s.io/kustomize/pkg/constants
+sigs.k8s.io/kustomize/pkg/expansion
+sigs.k8s.io/kustomize/pkg/factory
+sigs.k8s.io/kustomize/pkg/fs
+sigs.k8s.io/kustomize/pkg/git
+sigs.k8s.io/kustomize/pkg/gvk
+sigs.k8s.io/kustomize/pkg/ifc
+sigs.k8s.io/kustomize/pkg/ifc/transformer
+sigs.k8s.io/kustomize/pkg/image
+sigs.k8s.io/kustomize/pkg/internal/error
+sigs.k8s.io/kustomize/pkg/loader
+sigs.k8s.io/kustomize/pkg/patch
+sigs.k8s.io/kustomize/pkg/patch/transformer
+sigs.k8s.io/kustomize/pkg/resid
+sigs.k8s.io/kustomize/pkg/resmap
+sigs.k8s.io/kustomize/pkg/resource
+sigs.k8s.io/kustomize/pkg/target
+sigs.k8s.io/kustomize/pkg/transformers
+sigs.k8s.io/kustomize/pkg/transformers/config
+sigs.k8s.io/kustomize/pkg/transformers/config/defaultconfig
+sigs.k8s.io/kustomize/pkg/types
+# sigs.k8s.io/structured-merge-diff/v3 v3.0.0
+sigs.k8s.io/structured-merge-diff/v3/value
+# sigs.k8s.io/yaml v1.2.0
 sigs.k8s.io/yaml
diff --git a/vendor/sigs.k8s.io/kustomize/LICENSE b/vendor/sigs.k8s.io/kustomize/LICENSE
new file mode 100644
index 00000000..8dada3ed
--- /dev/null
+++ b/vendor/sigs.k8s.io/kustomize/LICENSE
@@ -0,0 +1,201 @@
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "{}"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright {yyyy} {name of copyright owner}
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff --git a/vendor/sigs.k8s.io/kustomize/pkg/commands/build/build.go b/vendor/sigs.k8s.io/kustomize/pkg/commands/build/build.go
new file mode 100644
index 00000000..e62747e3
--- /dev/null
+++ b/vendor/sigs.k8s.io/kustomize/pkg/commands/build/build.go
@@ -0,0 +1,129 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package build
+
+import (
+	"io"
+
+	"github.com/pkg/errors"
+	"github.com/spf13/cobra"
+	"sigs.k8s.io/kustomize/pkg/constants"
+	"sigs.k8s.io/kustomize/pkg/fs"
+	"sigs.k8s.io/kustomize/pkg/ifc/transformer"
+	"sigs.k8s.io/kustomize/pkg/loader"
+	"sigs.k8s.io/kustomize/pkg/resmap"
+	"sigs.k8s.io/kustomize/pkg/target"
+)
+
+// Options contain the options for running a build
+type Options struct {
+	kustomizationPath string
+	outputPath        string
+}
+
+// NewOptions creates a Options object
+func NewOptions(p, o string) *Options {
+	return &Options{
+		kustomizationPath: p,
+		outputPath:        o,
+	}
+}
+
+var examples = `
+Use the file somedir/kustomization.yaml to generate a set of api resources:
+    build somedir
+
+Use a url pointing to a remote directory/kustomization.yaml to generate a set of api resources:
+    build url
+The url should follow hashicorp/go-getter URL format described in
+https://github.com/hashicorp/go-getter#url-format
+
+url examples:
+  sigs.k8s.io/kustomize//examples/multibases?ref=v1.0.6
+  github.com/Liujingfang1/mysql
+  github.com/Liujingfang1/kustomize//examples/helloWorld?ref=repoUrl2
+`
+
+// NewCmdBuild creates a new build command.
+func NewCmdBuild(
+	out io.Writer, fs fs.FileSystem,
+	rf *resmap.Factory,
+	ptf transformer.Factory) *cobra.Command {
+	var o Options
+
+	cmd := &cobra.Command{
+		Use:          "build [path]",
+		Short:        "Print current configuration per contents of " + constants.KustomizationFileNames[0],
+		Example:      examples,
+		SilenceUsage: true,
+		RunE: func(cmd *cobra.Command, args []string) error {
+			err := o.Validate(args)
+			if err != nil {
+				return err
+			}
+			return o.RunBuild(out, fs, rf, ptf)
+		},
+	}
+	cmd.Flags().StringVarP(
+		&o.outputPath,
+		"output", "o", "",
+		"If specified, write the build output to this path.")
+	return cmd
+}
+
+// Validate validates build command.
+func (o *Options) Validate(args []string) error {
+	if len(args) > 1 {
+		return errors.New("specify one path to " + constants.KustomizationFileNames[0])
+	}
+	if len(args) == 0 {
+		o.kustomizationPath = "./"
+	} else {
+		o.kustomizationPath = args[0]
+	}
+
+	return nil
+}
+
+// RunBuild runs build command.
+func (o *Options) RunBuild(
+	out io.Writer, fSys fs.FileSystem,
+	rf *resmap.Factory, ptf transformer.Factory) error {
+	ldr, err := loader.NewLoader(o.kustomizationPath, fSys)
+	if err != nil {
+		return err
+	}
+	defer ldr.Cleanup()
+	kt, err := target.NewKustTarget(ldr, rf, ptf)
+	if err != nil {
+		return err
+	}
+	allResources, err := kt.MakeCustomizedResMap()
+	if err != nil {
+		return err
+	}
+	// Output the objects.
+	res, err := allResources.EncodeAsYaml()
+	if err != nil {
+		return err
+	}
+	if o.outputPath != "" {
+		return fSys.WriteFile(o.outputPath, res)
+	}
+	_, err = out.Write(res)
+	return err
+}
diff --git a/vendor/sigs.k8s.io/kustomize/pkg/constants/constants.go b/vendor/sigs.k8s.io/kustomize/pkg/constants/constants.go
new file mode 100644
index 00000000..dd50230f
--- /dev/null
+++ b/vendor/sigs.k8s.io/kustomize/pkg/constants/constants.go
@@ -0,0 +1,28 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package constants holds global constants for the kustomize tool.
+package constants
+
+// KustomizationFileNames is a list of filenames that can be recognized and consumbed
+// by Kustomize.
+// In each directory, Kustomize searches for file with the name in this list.
+// Only one match is allowed.
+var KustomizationFileNames = []string{
+	"kustomization.yaml",
+	"kustomization.yml",
+	"Kustomization",
+}
diff --git a/vendor/sigs.k8s.io/kustomize/pkg/expansion/expand.go b/vendor/sigs.k8s.io/kustomize/pkg/expansion/expand.go
new file mode 100644
index 00000000..de55e461
--- /dev/null
+++ b/vendor/sigs.k8s.io/kustomize/pkg/expansion/expand.go
@@ -0,0 +1,121 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package expansion provides functions find and replace $(FOO) style variables in strings.
+package expansion
+
+import (
+	"bytes"
+)
+
+const (
+	operator        = '$'
+	referenceOpener = '('
+	referenceCloser = ')'
+)
+
+// syntaxWrap returns the input string wrapped by the expansion syntax.
+func syntaxWrap(input string) string {
+	return string(operator) + string(referenceOpener) + input + string(referenceCloser)
+}
+
+// MappingFuncFor returns a mapping function for use with Expand that
+// implements the expansion semantics defined in the expansion spec; it
+// returns the input string wrapped in the expansion syntax if no mapping
+// for the input is found.
+func MappingFuncFor(
+	counts map[string]int,
+	context ...map[string]string) func(string) string {
+	return func(input string) string {
+		for _, vars := range context {
+			val, ok := vars[input]
+			if ok {
+				counts[input]++
+				return val
+			}
+		}
+		return syntaxWrap(input)
+	}
+}
+
+// Expand replaces variable references in the input string according to
+// the expansion spec using the given mapping function to resolve the
+// values of variables.
+func Expand(input string, mapping func(string) string) string {
+	var buf bytes.Buffer
+	checkpoint := 0
+	for cursor := 0; cursor < len(input); cursor++ {
+		if input[cursor] == operator && cursor+1 < len(input) {
+			// Copy the portion of the input string since the last
+			// checkpoint into the buffer
+			buf.WriteString(input[checkpoint:cursor])
+
+			// Attempt to read the variable name as defined by the
+			// syntax from the input string
+			read, isVar, advance := tryReadVariableName(input[cursor+1:])
+
+			if isVar {
+				// We were able to read a variable name correctly;
+				// apply the mapping to the variable name and copy the
+				// bytes into the buffer
+				buf.WriteString(mapping(read))
+			} else {
+				// Not a variable name; copy the read bytes into the buffer
+				buf.WriteString(read)
+			}
+
+			// Advance the cursor in the input string to account for
+			// bytes consumed to read the variable name expression
+			cursor += advance
+
+			// Advance the checkpoint in the input string
+			checkpoint = cursor + 1
+		}
+	}
+
+	// Return the buffer and any remaining unwritten bytes in the
+	// input string.
+	return buf.String() + input[checkpoint:]
+}
+
+// tryReadVariableName attempts to read a variable name from the input
+// string and returns the content read from the input, whether that content
+// represents a variable name to perform mapping on, and the number of bytes
+// consumed in the input string.
+//
+// The input string is assumed not to contain the initial operator.
+func tryReadVariableName(input string) (string, bool, int) {
+	switch input[0] {
+	case operator:
+		// Escaped operator; return it.
+		return input[0:1], false, 1
+	case referenceOpener:
+		// Scan to expression closer
+		for i := 1; i < len(input); i++ {
+			if input[i] == referenceCloser {
+				return input[1:i], true, i + 1
+			}
+		}
+
+		// Incomplete reference; return it.
+		return string(operator) + string(referenceOpener), false, 1
+	default:
+		// Not the beginning of an expression, ie, an operator
+		// that doesn't begin an expression.  Return the operator
+		// and the first rune in the string.
+		return string(operator) + string(input[0]), false, 1
+	}
+}
diff --git a/vendor/sigs.k8s.io/kustomize/pkg/factory/factory.go b/vendor/sigs.k8s.io/kustomize/pkg/factory/factory.go
new file mode 100644
index 00000000..e71669a8
--- /dev/null
+++ b/vendor/sigs.k8s.io/kustomize/pkg/factory/factory.go
@@ -0,0 +1,39 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+ Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+     http://www.apache.org/licenses/LICENSE-2.0
+ Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+// Package factory provides factories for kustomize.
+package factory
+
+import (
+	"sigs.k8s.io/kustomize/pkg/ifc"
+	"sigs.k8s.io/kustomize/pkg/ifc/transformer"
+	"sigs.k8s.io/kustomize/pkg/resmap"
+	"sigs.k8s.io/kustomize/pkg/resource"
+)
+
+// KustFactory provides different factories for kustomize
+type KustFactory struct {
+	ResmapF      *resmap.Factory
+	TransformerF transformer.Factory
+	ValidatorF   ifc.Validator
+	UnstructF    ifc.KunstructuredFactory
+}
+
+// NewKustFactory creats a KustFactory instance
+func NewKustFactory(u ifc.KunstructuredFactory, v ifc.Validator, t transformer.Factory) *KustFactory {
+	return &KustFactory{
+		ResmapF:      resmap.NewFactory(resource.NewFactory(u)),
+		TransformerF: t,
+		ValidatorF:   v,
+		UnstructF:    u,
+	}
+}
diff --git a/vendor/sigs.k8s.io/kustomize/pkg/fs/confirmeddir.go b/vendor/sigs.k8s.io/kustomize/pkg/fs/confirmeddir.go
new file mode 100644
index 00000000..5d12bf07
--- /dev/null
+++ b/vendor/sigs.k8s.io/kustomize/pkg/fs/confirmeddir.go
@@ -0,0 +1,93 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package fs
+
+import (
+	"io/ioutil"
+	"path/filepath"
+	"strings"
+)
+
+// ConfirmedDir is a clean, absolute, delinkified path
+// that was confirmed to point to an existing directory.
+type ConfirmedDir string
+
+// NewTmpConfirmedDir returns a temporary dir, else error.
+// The directory is cleaned, no symlinks, etc. so its
+// returned as a ConfirmedDir.
+func NewTmpConfirmedDir() (ConfirmedDir, error) {
+	n, err := ioutil.TempDir("", "kustomize-")
+	if err != nil {
+		return "", err
+	}
+
+	// In MacOs `ioutil.TempDir` creates a directory
+	// with root in the `/var` folder, which is in turn a symlinked path
+	// to `/private/var`.
+	// Function `filepath.EvalSymlinks`is used to
+	// resolve the real absolute path.
+	deLinked, err := filepath.EvalSymlinks(n)
+	return ConfirmedDir(deLinked), err
+
+}
+
+// HasPrefix returns true if the directory argument
+// is a prefix of self (d) from the point of view of
+// a file system.
+//
+// I.e., it's true if the argument equals or contains
+// self (d) in a file path sense.
+//
+// HasPrefix emulates the semantics of strings.HasPrefix
+// such that the following are true:
+//
+//   strings.HasPrefix("foobar", "foobar")
+//   strings.HasPrefix("foobar", "foo")
+//   strings.HasPrefix("foobar", "")
+//
+//   d := fSys.ConfirmDir("/foo/bar")
+//   d.HasPrefix("/foo/bar")
+//   d.HasPrefix("/foo")
+//   d.HasPrefix("/")
+//
+// Not contacting a file system here to check for
+// actual path existence.
+//
+// This is tested on linux, but will have trouble
+// on other operating systems.
+// TODO(monopole) Refactor when #golang/go/18358 closes.
+// See also:
+//   https://github.com/golang/go/issues/18358
+//   https://github.com/golang/dep/issues/296
+//   https://github.com/golang/dep/blob/master/internal/fs/fs.go#L33
+//   https://codereview.appspot.com/5712045
+func (d ConfirmedDir) HasPrefix(path ConfirmedDir) bool {
+	if path.String() == string(filepath.Separator) || path == d {
+		return true
+	}
+	return strings.HasPrefix(
+		string(d),
+		string(path)+string(filepath.Separator))
+}
+
+func (d ConfirmedDir) Join(path string) string {
+	return filepath.Join(string(d), path)
+}
+
+func (d ConfirmedDir) String() string {
+	return string(d)
+}
diff --git a/vendor/sigs.k8s.io/kustomize/pkg/fs/fakefile.go b/vendor/sigs.k8s.io/kustomize/pkg/fs/fakefile.go
new file mode 100644
index 00000000..64bc5568
--- /dev/null
+++ b/vendor/sigs.k8s.io/kustomize/pkg/fs/fakefile.go
@@ -0,0 +1,69 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package fs
+
+import (
+	"bytes"
+	"os"
+)
+
+var _ File = &FakeFile{}
+
+// FakeFile implements File in-memory for tests.
+type FakeFile struct {
+	name    string
+	content []byte
+	dir     bool
+	open    bool
+}
+
+// makeDir makes a fake directory.
+func makeDir(name string) *FakeFile {
+	return &FakeFile{name: name, dir: true}
+}
+
+// Close marks the fake file closed.
+func (f *FakeFile) Close() error {
+	f.open = false
+	return nil
+}
+
+// Read never fails, and doesn't mutate p.
+func (f *FakeFile) Read(p []byte) (n int, err error) {
+	return len(p), nil
+}
+
+// Write saves the contents of the argument to memory.
+func (f *FakeFile) Write(p []byte) (n int, err error) {
+	f.content = p
+	return len(p), nil
+}
+
+// ContentMatches returns true if v matches fake file's content.
+func (f *FakeFile) ContentMatches(v []byte) bool {
+	return bytes.Equal(v, f.content)
+}
+
+// GetContent the content of a fake file.
+func (f *FakeFile) GetContent() []byte {
+	return f.content
+}
+
+// Stat returns nil.
+func (f *FakeFile) Stat() (os.FileInfo, error) {
+	return nil, nil
+}
diff --git a/vendor/sigs.k8s.io/kustomize/pkg/fs/fakefileinfo.go b/vendor/sigs.k8s.io/kustomize/pkg/fs/fakefileinfo.go
new file mode 100644
index 00000000..6ccca915
--- /dev/null
+++ b/vendor/sigs.k8s.io/kustomize/pkg/fs/fakefileinfo.go
@@ -0,0 +1,47 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package fs
+
+import (
+	"os"
+	"time"
+)
+
+var _ os.FileInfo = &Fakefileinfo{}
+
+// Fakefileinfo implements Fakefileinfo using a fake in-memory filesystem.
+type Fakefileinfo struct {
+	*FakeFile
+}
+
+// Name returns the name of the file
+func (fi *Fakefileinfo) Name() string { return fi.name }
+
+// Size returns the size of the file
+func (fi *Fakefileinfo) Size() int64 { return int64(len(fi.content)) }
+
+// Mode returns the file mode
+func (fi *Fakefileinfo) Mode() os.FileMode { return 0777 }
+
+// ModTime returns the modification time
+func (fi *Fakefileinfo) ModTime() time.Time { return time.Time{} }
+
+// IsDir returns if it is a directory
+func (fi *Fakefileinfo) IsDir() bool { return fi.dir }
+
+// Sys should return underlying data source, but it now returns nil
+func (fi *Fakefileinfo) Sys() interface{} { return nil }
diff --git a/vendor/sigs.k8s.io/kustomize/pkg/fs/fakefs.go b/vendor/sigs.k8s.io/kustomize/pkg/fs/fakefs.go
new file mode 100644
index 00000000..59c0966b
--- /dev/null
+++ b/vendor/sigs.k8s.io/kustomize/pkg/fs/fakefs.go
@@ -0,0 +1,185 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package fs
+
+import (
+	"fmt"
+	"path/filepath"
+	"sort"
+	"strings"
+
+	"sigs.k8s.io/kustomize/pkg/constants"
+)
+
+var _ FileSystem = &fakeFs{}
+
+// fakeFs implements FileSystem using a fake in-memory filesystem.
+type fakeFs struct {
+	m map[string]*FakeFile
+}
+
+// MakeFakeFS returns an instance of fakeFs with no files in it.
+func MakeFakeFS() *fakeFs {
+	result := &fakeFs{m: map[string]*FakeFile{}}
+	result.Mkdir("/")
+	return result
+}
+
+// kustomizationContent is used in tests.
+const kustomizationContent = `apiVersion: kustomize.config.k8s.io/v1beta1
+kind: Kustomization
+namePrefix: some-prefix
+nameSuffix: some-suffix
+# Labels to add to all objects and selectors.
+# These labels would also be used to form the selector for apply --prune
+# Named differently than “labels” to avoid confusion with metadata for this object
+commonLabels:
+  app: helloworld
+commonAnnotations:
+  note: This is an example annotation
+resources: []
+#- service.yaml
+#- ../some-dir/
+# There could also be configmaps in Base, which would make these overlays
+configMapGenerator: []
+# There could be secrets in Base, if just using a fork/rebase workflow
+secretGenerator: []
+`
+
+// Create assures a fake file appears in the in-memory file system.
+func (fs *fakeFs) Create(name string) (File, error) {
+	f := &FakeFile{}
+	f.open = true
+	fs.m[name] = f
+	return fs.m[name], nil
+}
+
+// Mkdir assures a fake directory appears in the in-memory file system.
+func (fs *fakeFs) Mkdir(name string) error {
+	fs.m[name] = makeDir(name)
+	return nil
+}
+
+// MkdirAll delegates to Mkdir
+func (fs *fakeFs) MkdirAll(name string) error {
+	return fs.Mkdir(name)
+}
+
+// RemoveAll presumably does rm -r on a path.
+// There's no error.
+func (fs *fakeFs) RemoveAll(name string) error {
+	var toRemove []string
+	for k := range fs.m {
+		if strings.HasPrefix(k, name) {
+			toRemove = append(toRemove, k)
+		}
+	}
+	for _, k := range toRemove {
+		delete(fs.m, k)
+	}
+	return nil
+}
+
+// Open returns a fake file in the open state.
+func (fs *fakeFs) Open(name string) (File, error) {
+	if _, found := fs.m[name]; !found {
+		return nil, fmt.Errorf("file %q cannot be opened", name)
+	}
+	return fs.m[name], nil
+}
+
+// CleanedAbs cannot fail.
+func (fs *fakeFs) CleanedAbs(path string) (ConfirmedDir, string, error) {
+	if fs.IsDir(path) {
+		return ConfirmedDir(path), "", nil
+	}
+	d := filepath.Dir(path)
+	if d == path {
+		return ConfirmedDir(d), "", nil
+	}
+	return ConfirmedDir(d), filepath.Base(path), nil
+}
+
+// Exists returns true if file is known.
+func (fs *fakeFs) Exists(name string) bool {
+	_, found := fs.m[name]
+	return found
+}
+
+// Glob returns the list of matching files
+func (fs *fakeFs) Glob(pattern string) ([]string, error) {
+	var result []string
+	for p := range fs.m {
+		if fs.pathMatch(p, pattern) {
+			result = append(result, p)
+		}
+	}
+	sort.Strings(result)
+	return result, nil
+}
+
+// IsDir returns true if the file exists and is a directory.
+func (fs *fakeFs) IsDir(name string) bool {
+	f, found := fs.m[name]
+	if found && f.dir {
+		return true
+	}
+	if !strings.HasSuffix(name, "/") {
+		name = name + "/"
+	}
+	for k := range fs.m {
+		if strings.HasPrefix(k, name) {
+			return true
+		}
+	}
+	return false
+}
+
+// ReadFile always returns an empty bytes and error depending on content of m.
+func (fs *fakeFs) ReadFile(name string) ([]byte, error) {
+	if ff, found := fs.m[name]; found {
+		return ff.content, nil
+	}
+	return nil, fmt.Errorf("cannot read file %q", name)
+}
+
+func (fs *fakeFs) ReadTestKustomization() ([]byte, error) {
+	return fs.ReadFile(constants.KustomizationFileNames[0])
+}
+
+// WriteFile always succeeds and does nothing.
+func (fs *fakeFs) WriteFile(name string, c []byte) error {
+	ff := &FakeFile{}
+	ff.Write(c)
+	fs.m[name] = ff
+	return nil
+}
+
+// WriteTestKustomization writes a standard test file.
+func (fs *fakeFs) WriteTestKustomization() {
+	fs.WriteTestKustomizationWith([]byte(kustomizationContent))
+}
+
+// WriteTestKustomizationWith writes a standard test file.
+func (fs *fakeFs) WriteTestKustomizationWith(bytes []byte) {
+	fs.WriteFile(constants.KustomizationFileNames[0], bytes)
+}
+
+func (fs *fakeFs) pathMatch(path, pattern string) bool {
+	match, _ := filepath.Match(pattern, path)
+	return match
+}
diff --git a/vendor/sigs.k8s.io/kustomize/pkg/fs/fs.go b/vendor/sigs.k8s.io/kustomize/pkg/fs/fs.go
new file mode 100644
index 00000000..4b47dba6
--- /dev/null
+++ b/vendor/sigs.k8s.io/kustomize/pkg/fs/fs.go
@@ -0,0 +1,44 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package fs provides a file system abstraction layer.
+package fs
+
+import (
+	"io"
+	"os"
+)
+
+// FileSystem groups basic os filesystem methods.
+type FileSystem interface {
+	Create(name string) (File, error)
+	Mkdir(name string) error
+	MkdirAll(name string) error
+	RemoveAll(name string) error
+	Open(name string) (File, error)
+	IsDir(name string) bool
+	CleanedAbs(path string) (ConfirmedDir, string, error)
+	Exists(name string) bool
+	Glob(pattern string) ([]string, error)
+	ReadFile(name string) ([]byte, error)
+	WriteFile(name string, data []byte) error
+}
+
+// File groups the basic os.File methods.
+type File interface {
+	io.ReadWriteCloser
+	Stat() (os.FileInfo, error)
+}
diff --git a/vendor/sigs.k8s.io/kustomize/pkg/fs/realfile.go b/vendor/sigs.k8s.io/kustomize/pkg/fs/realfile.go
new file mode 100644
index 00000000..5bfec55a
--- /dev/null
+++ b/vendor/sigs.k8s.io/kustomize/pkg/fs/realfile.go
@@ -0,0 +1,40 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package fs
+
+import (
+	"os"
+)
+
+var _ File = &realFile{}
+
+// realFile implements File using the local filesystem.
+type realFile struct {
+	file *os.File
+}
+
+// Close closes a file.
+func (f *realFile) Close() error { return f.file.Close() }
+
+// Read reads a file's content.
+func (f *realFile) Read(p []byte) (n int, err error) { return f.file.Read(p) }
+
+// Write writes bytes to a file
+func (f *realFile) Write(p []byte) (n int, err error) { return f.file.Write(p) }
+
+// Stat returns an interface which has all the information regarding the file.
+func (f *realFile) Stat() (os.FileInfo, error) { return f.file.Stat() }
diff --git a/vendor/sigs.k8s.io/kustomize/pkg/fs/realfs.go b/vendor/sigs.k8s.io/kustomize/pkg/fs/realfs.go
new file mode 100644
index 00000000..11e5813b
--- /dev/null
+++ b/vendor/sigs.k8s.io/kustomize/pkg/fs/realfs.go
@@ -0,0 +1,122 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package fs
+
+import (
+	"fmt"
+	"io/ioutil"
+	"log"
+	"os"
+	"path/filepath"
+)
+
+var _ FileSystem = realFS{}
+
+// realFS implements FileSystem using the local filesystem.
+type realFS struct{}
+
+// MakeRealFS makes an instance of realFS.
+func MakeRealFS() FileSystem {
+	return realFS{}
+}
+
+// Create delegates to os.Create.
+func (realFS) Create(name string) (File, error) { return os.Create(name) }
+
+// Mkdir delegates to os.Mkdir.
+func (realFS) Mkdir(name string) error {
+	return os.Mkdir(name, 0777|os.ModeDir)
+}
+
+// MkdirAll delegates to os.MkdirAll.
+func (realFS) MkdirAll(name string) error {
+	return os.MkdirAll(name, 0777|os.ModeDir)
+}
+
+// RemoveAll delegates to os.RemoveAll.
+func (realFS) RemoveAll(name string) error {
+	return os.RemoveAll(name)
+}
+
+// Open delegates to os.Open.
+func (realFS) Open(name string) (File, error) { return os.Open(name) }
+
+// CleanedAbs returns a cleaned, absolute path
+// with no symbolic links split into directory
+// and file components.  If the entire path is
+// a directory, the file component is an empty
+// string.
+func (x realFS) CleanedAbs(
+	path string) (ConfirmedDir, string, error) {
+	absRoot, err := filepath.Abs(path)
+	if err != nil {
+		return "", "", fmt.Errorf(
+			"abs path error on '%s' : %v", path, err)
+	}
+	deLinked, err := filepath.EvalSymlinks(absRoot)
+	if err != nil {
+		return "", "", fmt.Errorf(
+			"evalsymlink failure on '%s' : %v", path, err)
+	}
+	if x.IsDir(deLinked) {
+		return ConfirmedDir(deLinked), "", nil
+	}
+	d := filepath.Dir(deLinked)
+	if !x.IsDir(d) {
+		// Programmer/assumption error.
+		log.Fatalf("first part of '%s' not a directory", deLinked)
+	}
+	if d == deLinked {
+		// Programmer/assumption error.
+		log.Fatalf("d '%s' should be a subset of deLinked", d)
+	}
+	f := filepath.Base(deLinked)
+	if filepath.Join(d, f) != deLinked {
+		// Programmer/assumption error.
+		log.Fatalf("these should be equal: '%s', '%s'",
+			filepath.Join(d, f), deLinked)
+	}
+	return ConfirmedDir(d), f, nil
+}
+
+// Exists returns true if os.Stat succeeds.
+func (realFS) Exists(name string) bool {
+	_, err := os.Stat(name)
+	return err == nil
+}
+
+// Glob returns the list of matching files
+func (realFS) Glob(pattern string) ([]string, error) {
+	return filepath.Glob(pattern)
+}
+
+// IsDir delegates to os.Stat and FileInfo.IsDir
+func (realFS) IsDir(name string) bool {
+	info, err := os.Stat(name)
+	if err != nil {
+		return false
+	}
+	return info.IsDir()
+}
+
+// ReadFile delegates to ioutil.ReadFile.
+func (realFS) ReadFile(name string) ([]byte, error) { return ioutil.ReadFile(name) }
+
+// WriteFile delegates to ioutil.WriteFile with read/write permissions.
+func (realFS) WriteFile(name string, c []byte) error {
+	return ioutil.WriteFile(name, c, 0666)
+}
diff --git a/vendor/sigs.k8s.io/kustomize/pkg/git/cloner.go b/vendor/sigs.k8s.io/kustomize/pkg/git/cloner.go
new file mode 100644
index 00000000..465fdb1d
--- /dev/null
+++ b/vendor/sigs.k8s.io/kustomize/pkg/git/cloner.go
@@ -0,0 +1,75 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package git
+
+import (
+	"bytes"
+	"os/exec"
+
+	"github.com/pkg/errors"
+	"sigs.k8s.io/kustomize/pkg/fs"
+)
+
+// Cloner is a function that can clone a git repo.
+type Cloner func(repoSpec *RepoSpec) error
+
+// ClonerUsingGitExec uses a local git install, as opposed
+// to say, some remote API, to obtain a local clone of
+// a remote repo.
+func ClonerUsingGitExec(repoSpec *RepoSpec) error {
+	gitProgram, err := exec.LookPath("git")
+	if err != nil {
+		return errors.Wrap(err, "no 'git' program on path")
+	}
+	repoSpec.cloneDir, err = fs.NewTmpConfirmedDir()
+	if err != nil {
+		return err
+	}
+	cmd := exec.Command(
+		gitProgram,
+		"clone",
+		repoSpec.CloneSpec(),
+		repoSpec.cloneDir.String())
+	var out bytes.Buffer
+	cmd.Stdout = &out
+	err = cmd.Run()
+	if err != nil {
+		return errors.Wrapf(err, "trouble cloning %s", repoSpec.raw)
+	}
+	if repoSpec.ref == "" {
+		return nil
+	}
+	cmd = exec.Command(gitProgram, "checkout", repoSpec.ref)
+	cmd.Dir = repoSpec.cloneDir.String()
+	err = cmd.Run()
+	if err != nil {
+		return errors.Wrapf(
+			err, "trouble checking out href %s", repoSpec.ref)
+	}
+	return nil
+}
+
+// DoNothingCloner returns a cloner that only sets
+// cloneDir field in the repoSpec.  It's assumed that
+// the cloneDir is associated with some fake filesystem
+// used in a test.
+func DoNothingCloner(dir fs.ConfirmedDir) Cloner {
+	return func(rs *RepoSpec) error {
+		rs.cloneDir = dir
+		return nil
+	}
+}
diff --git a/vendor/sigs.k8s.io/kustomize/pkg/git/repospec.go b/vendor/sigs.k8s.io/kustomize/pkg/git/repospec.go
new file mode 100644
index 00000000..b3251f65
--- /dev/null
+++ b/vendor/sigs.k8s.io/kustomize/pkg/git/repospec.go
@@ -0,0 +1,214 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package git
+
+import (
+	"fmt"
+	"path/filepath"
+	"strings"
+
+	"sigs.k8s.io/kustomize/pkg/fs"
+)
+
+// Used as a temporary non-empty occupant of the cloneDir
+// field, as something distinguishable from the empty string
+// in various outputs (especially tests). Not using an
+// actual directory name here, as that's a temporary directory
+// with a unique name that isn't created until clone time.
+const notCloned = fs.ConfirmedDir("/notCloned")
+
+// RepoSpec specifies a git repository and a branch and path therein.
+type RepoSpec struct {
+	// Raw, original spec, used to look for cycles.
+	// TODO(monopole): Drop raw, use processed fields instead.
+	raw string
+
+	// Host, e.g. github.com
+	host string
+
+	// orgRepo name (organization/repoName),
+	// e.g. kubernetes-sigs/kustomize
+	orgRepo string
+
+	// ConfirmedDir where the orgRepo is cloned to.
+	cloneDir fs.ConfirmedDir
+
+	// Relative path in the repository, and in the cloneDir,
+	// to a Kustomization.
+	path string
+
+	// Branch or tag reference.
+	ref string
+}
+
+// CloneSpec returns a string suitable for "git clone {spec}".
+func (x *RepoSpec) CloneSpec() string {
+	if isAzureHost(x.host) || isAWSHost(x.host) {
+		return x.host + x.orgRepo
+	}
+	return x.host + x.orgRepo + gitSuffix
+}
+
+func (x *RepoSpec) CloneDir() fs.ConfirmedDir {
+	return x.cloneDir
+}
+
+func (x *RepoSpec) Raw() string {
+	return x.raw
+}
+
+func (x *RepoSpec) AbsPath() string {
+	return x.cloneDir.Join(x.path)
+}
+
+func (x *RepoSpec) Cleaner(fSys fs.FileSystem) func() error {
+	return func() error { return fSys.RemoveAll(x.cloneDir.String()) }
+}
+
+// From strings like git@github.com:someOrg/someRepo.git or
+// https://github.com/someOrg/someRepo?ref=someHash, extract
+// the parts.
+func NewRepoSpecFromUrl(n string) (*RepoSpec, error) {
+	if filepath.IsAbs(n) {
+		return nil, fmt.Errorf("uri looks like abs path: %s", n)
+	}
+	host, orgRepo, path, gitRef := parseGithubUrl(n)
+	if orgRepo == "" {
+		return nil, fmt.Errorf("url lacks orgRepo: %s", n)
+	}
+	if host == "" {
+		return nil, fmt.Errorf("url lacks host: %s", n)
+	}
+	return &RepoSpec{
+		raw: n, host: host, orgRepo: orgRepo,
+		cloneDir: notCloned, path: path, ref: gitRef}, nil
+}
+
+const (
+	refQuery  = "?ref="
+	gitSuffix = ".git"
+)
+
+// From strings like git@github.com:someOrg/someRepo.git or
+// https://github.com/someOrg/someRepo?ref=someHash, extract
+// the parts.
+func parseGithubUrl(n string) (
+	host string, orgRepo string, path string, gitRef string) {
+	host, n = parseHostSpec(n)
+
+	if strings.Contains(n, gitSuffix) {
+		index := strings.Index(n, gitSuffix)
+		orgRepo = n[0:index]
+		n = n[index+len(gitSuffix):]
+		path, gitRef = peelQuery(n)
+		return
+	}
+
+	i := strings.Index(n, "/")
+	if i < 1 {
+		return "", "", "", ""
+	}
+	j := strings.Index(n[i+1:], "/")
+	if j >= 0 {
+		j += i + 1
+		orgRepo = n[:j]
+		path, gitRef = peelQuery(n[j+1:])
+	} else {
+		path = ""
+		orgRepo, gitRef = peelQuery(n)
+	}
+	return
+}
+
+func peelQuery(arg string) (string, string) {
+	j := strings.Index(arg, refQuery)
+	if j >= 0 {
+		return arg[:j], arg[j+len(refQuery):]
+	}
+	return arg, ""
+}
+
+func parseHostSpec(n string) (string, string) {
+	var host string
+	// Start accumulating the host part.
+	for _, p := range []string{
+		// Order matters here.
+		"git::", "gh:", "ssh://", "https://", "http://",
+		"git@", "github.com:", "github.com/"} {
+		if len(p) < len(n) && strings.ToLower(n[:len(p)]) == p {
+			n = n[len(p):]
+			host += p
+		}
+	}
+	if host == "git@" {
+		i := strings.Index(n, "/")
+		if i > -1 {
+			host += n[:i+1]
+			n = n[i+1:]
+		} else {
+			i = strings.Index(n, ":")
+			if i > -1 {
+				host += n[:i+1]
+				n = n[i+1:]
+			}
+		}
+		return host, n
+	}
+
+	// If host is a http(s) or ssh URL, grab the domain part.
+	for _, p := range []string{
+		"ssh://", "https://", "http://"} {
+		if strings.HasSuffix(host, p) {
+			i := strings.Index(n, "/")
+			if i > -1 {
+				host = host + n[0:i+1]
+				n = n[i+1:]
+			}
+			break
+		}
+	}
+
+	return normalizeGitHostSpec(host), n
+}
+
+func normalizeGitHostSpec(host string) string {
+	s := strings.ToLower(host)
+	if strings.Contains(s, "github.com") {
+		if strings.Contains(s, "git@") || strings.Contains(s, "ssh:") {
+			host = "git@github.com:"
+		} else {
+			host = "https://github.com/"
+		}
+	}
+	if strings.HasPrefix(s, "git::") {
+		host = strings.TrimLeft(s, "git::")
+	}
+	return host
+}
+
+// The format of Azure repo URL is documented
+// https://docs.microsoft.com/en-us/azure/devops/repos/git/clone?view=vsts&tabs=visual-studio#clone_url
+func isAzureHost(host string) bool {
+	return strings.Contains(host, "dev.azure.com") ||
+		strings.Contains(host, "visualstudio.com")
+}
+
+// The format of AWS repo URL is documented
+// https://docs.aws.amazon.com/codecommit/latest/userguide/regions.html
+func isAWSHost(host string) bool {
+	return strings.Contains(host, "amazonaws.com")
+}
diff --git a/vendor/sigs.k8s.io/kustomize/pkg/gvk/gvk.go b/vendor/sigs.k8s.io/kustomize/pkg/gvk/gvk.go
new file mode 100644
index 00000000..890c8e8b
--- /dev/null
+++ b/vendor/sigs.k8s.io/kustomize/pkg/gvk/gvk.go
@@ -0,0 +1,180 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package gvk
+
+import (
+	"strings"
+)
+
+// Gvk identifies a Kubernetes API type.
+// https://github.com/kubernetes/community/blob/master/contributors/design-proposals/api-machinery/api-group.md
+type Gvk struct {
+	Group   string `json:"group,omitempty" yaml:"group,omitempty"`
+	Version string `json:"version,omitempty" yaml:"version,omitempty"`
+	Kind    string `json:"kind,omitempty" yaml:"kind,omitempty"`
+}
+
+// FromKind makes a Gvk with only the kind specified.
+func FromKind(k string) Gvk {
+	return Gvk{
+		Kind: k,
+	}
+}
+
+// Values that are brief but meaningful in logs.
+const (
+	noGroup   = "~G"
+	noVersion = "~V"
+	noKind    = "~K"
+	separator = "_"
+)
+
+// String returns a string representation of the GVK.
+func (x Gvk) String() string {
+	g := x.Group
+	if g == "" {
+		g = noGroup
+	}
+	v := x.Version
+	if v == "" {
+		v = noVersion
+	}
+	k := x.Kind
+	if k == "" {
+		k = noKind
+	}
+	return strings.Join([]string{g, v, k}, separator)
+}
+
+// Equals returns true if the Gvk's have equal fields.
+func (x Gvk) Equals(o Gvk) bool {
+	return x.Group == o.Group && x.Version == o.Version && x.Kind == o.Kind
+}
+
+// An attempt to order things to help k8s, e.g.
+// a Service should come before things that refer to it.
+// Namespace should be first.
+// In some cases order just specified to provide determinism.
+var order = []string{
+	"Namespace",
+	"StorageClass",
+	"CustomResourceDefinition",
+	"MutatingWebhookConfiguration",
+	"ValidatingWebhookConfiguration",
+	"ServiceAccount",
+	"Role",
+	"ClusterRole",
+	"RoleBinding",
+	"ClusterRoleBinding",
+	"ConfigMap",
+	"Secret",
+	"Service",
+	"Deployment",
+	"StatefulSet",
+	"CronJob",
+	"PodDisruptionBudget",
+}
+var typeOrders = func() map[string]int {
+	m := map[string]int{}
+	for i, n := range order {
+		m[n] = i
+	}
+	return m
+}()
+
+// IsLessThan returns true if self is less than the argument.
+func (x Gvk) IsLessThan(o Gvk) bool {
+	indexI, foundI := typeOrders[x.Kind]
+	indexJ, foundJ := typeOrders[o.Kind]
+	if foundI && foundJ {
+		if indexI != indexJ {
+			return indexI < indexJ
+		}
+	}
+	if foundI && !foundJ {
+		return true
+	}
+	if !foundI && foundJ {
+		return false
+	}
+	return x.String() < o.String()
+}
+
+// IsSelected returns true if `selector` selects `x`; otherwise, false.
+// If `selector` and `x` are the same, return true.
+// If `selector` is nil, it is considered a wildcard match, returning true.
+// If selector fields are empty, they are considered wildcards matching
+// anything in the corresponding fields, e.g.
+//
+// this item:
+//       <Group: "extensions", Version: "v1beta1", Kind: "Deployment">
+//
+// is selected by
+//       <Group: "",           Version: "",        Kind: "Deployment">
+//
+// but rejected by
+//       <Group: "apps",       Version: "",        Kind: "Deployment">
+//
+func (x Gvk) IsSelected(selector *Gvk) bool {
+	if selector == nil {
+		return true
+	}
+	if len(selector.Group) > 0 {
+		if x.Group != selector.Group {
+			return false
+		}
+	}
+	if len(selector.Version) > 0 {
+		if x.Version != selector.Version {
+			return false
+		}
+	}
+	if len(selector.Kind) > 0 {
+		if x.Kind != selector.Kind {
+			return false
+		}
+	}
+	return true
+}
+
+var clusterLevelKinds = []string{
+	"APIService",
+	"ClusterRoleBinding",
+	"ClusterRole",
+	"CustomResourceDefinition",
+	"Namespace",
+	"PersistentVolume",
+}
+
+// IsClusterKind returns true if x is a cluster-level Gvk
+func (x Gvk) IsClusterKind() bool {
+	for _, k := range clusterLevelKinds {
+		if k == x.Kind {
+			return true
+		}
+	}
+	return false
+}
+
+// ClusterLevelGvks returns a slice of cluster-level Gvks
+func ClusterLevelGvks() []Gvk {
+	var result []Gvk
+	for _, k := range clusterLevelKinds {
+		result = append(result, Gvk{Kind: k})
+	}
+	return result
+}
diff --git a/vendor/sigs.k8s.io/kustomize/pkg/ifc/ifc.go b/vendor/sigs.k8s.io/kustomize/pkg/ifc/ifc.go
new file mode 100644
index 00000000..e6267cae
--- /dev/null
+++ b/vendor/sigs.k8s.io/kustomize/pkg/ifc/ifc.go
@@ -0,0 +1,73 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package ifc holds miscellaneous interfaces used by kustomize.
+package ifc
+
+import (
+	"sigs.k8s.io/kustomize/pkg/gvk"
+	"sigs.k8s.io/kustomize/pkg/types"
+)
+
+// Validator provides functions to validate annotations and labels
+type Validator interface {
+	MakeAnnotationValidator() func(map[string]string) error
+	MakeLabelValidator() func(map[string]string) error
+	ValidateNamespace(string) []string
+}
+
+// Loader interface exposes methods to read bytes.
+type Loader interface {
+	// Root returns the root location for this Loader.
+	Root() string
+	// New returns Loader located at newRoot.
+	New(newRoot string) (Loader, error)
+	// Load returns the bytes read from the location or an error.
+	Load(location string) ([]byte, error)
+	// Cleanup cleans the loader
+	Cleanup() error
+}
+
+// Kunstructured allows manipulation of k8s objects
+// that do not have Golang structs.
+type Kunstructured interface {
+	Map() map[string]interface{}
+	SetMap(map[string]interface{})
+	Copy() Kunstructured
+	GetFieldValue(string) (string, error)
+	MarshalJSON() ([]byte, error)
+	UnmarshalJSON([]byte) error
+	GetGvk() gvk.Gvk
+	GetKind() string
+	GetName() string
+	SetName(string)
+	GetLabels() map[string]string
+	SetLabels(map[string]string)
+	GetAnnotations() map[string]string
+	SetAnnotations(map[string]string)
+}
+
+// KunstructuredFactory makes instances of Kunstructured.
+type KunstructuredFactory interface {
+	SliceFromBytes([]byte) ([]Kunstructured, error)
+	FromMap(m map[string]interface{}) Kunstructured
+	MakeConfigMap(args *types.ConfigMapArgs, options *types.GeneratorOptions) (Kunstructured, error)
+	MakeSecret(args *types.SecretArgs, options *types.GeneratorOptions) (Kunstructured, error)
+	Set(ldr Loader)
+}
+
+// See core.v1.SecretTypeOpaque
+const SecretTypeOpaque = "Opaque"
diff --git a/vendor/sigs.k8s.io/kustomize/pkg/ifc/transformer/factory.go b/vendor/sigs.k8s.io/kustomize/pkg/ifc/transformer/factory.go
new file mode 100644
index 00000000..0a74c280
--- /dev/null
+++ b/vendor/sigs.k8s.io/kustomize/pkg/ifc/transformer/factory.go
@@ -0,0 +1,29 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package patch holds miscellaneous interfaces used by kustomize.
+package transformer
+
+import (
+	"sigs.k8s.io/kustomize/pkg/resource"
+	"sigs.k8s.io/kustomize/pkg/transformers"
+)
+
+// Factory makes transformers
+type Factory interface {
+	MakePatchTransformer(slice []*resource.Resource, rf *resource.Factory) (transformers.Transformer, error)
+	MakeHashTransformer() transformers.Transformer
+}
diff --git a/vendor/sigs.k8s.io/kustomize/pkg/image/deprecatedimage.go b/vendor/sigs.k8s.io/kustomize/pkg/image/deprecatedimage.go
new file mode 100644
index 00000000..65db4051
--- /dev/null
+++ b/vendor/sigs.k8s.io/kustomize/pkg/image/deprecatedimage.go
@@ -0,0 +1,32 @@
+/*
+Copyright 2019 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package image
+
+// DeprecatedImage contains an image and a new tag,
+// which will replace the original tag.
+// Deprecated, instead use Image.
+type DeprecatedImage struct {
+	// Name is a tag-less image name.
+	Name string `json:"name,omitempty" yaml:"name,omitempty"`
+
+	// NewTag is the value to use in replacing the original tag.
+	NewTag string `json:"newTag,omitempty" yaml:"newTag,omitempty"`
+
+	// Digest is the value used to replace the original image tag.
+	// If digest is present NewTag value is ignored.
+	Digest string `json:"digest,omitempty" yaml:"digest,omitempty"`
+}
diff --git a/vendor/sigs.k8s.io/kustomize/pkg/image/image.go b/vendor/sigs.k8s.io/kustomize/pkg/image/image.go
new file mode 100644
index 00000000..dbe3b8b1
--- /dev/null
+++ b/vendor/sigs.k8s.io/kustomize/pkg/image/image.go
@@ -0,0 +1,36 @@
+/*
+Copyright 2019 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package image provides struct definitions and libraries
+// for image overwriting of names, tags and digest.
+package image
+
+// Image contains an image name, a new name, a new tag or digest,
+// which will replace the original name and tag.
+type Image struct {
+	// Name is a tag-less image name.
+	Name string `json:"name,omitempty" yaml:"name,omitempty"`
+
+	// NewName is the value used to replace the original name.
+	NewName string `json:"newName,omitempty" yaml:"newName,omitempty"`
+
+	// NewTag is the value used to replace the original tag.
+	NewTag string `json:"newTag,omitempty" yaml:"newTag,omitempty"`
+
+	// Digest is the value used to replace the original image tag.
+	// If digest is present NewTag value is ignored.
+	Digest string `json:"digest,omitempty" yaml:"digest,omitempty"`
+}
diff --git a/vendor/sigs.k8s.io/kustomize/pkg/internal/error/configmaperror.go b/vendor/sigs.k8s.io/kustomize/pkg/internal/error/configmaperror.go
new file mode 100644
index 00000000..1d60d78a
--- /dev/null
+++ b/vendor/sigs.k8s.io/kustomize/pkg/internal/error/configmaperror.go
@@ -0,0 +1,30 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package error has contextual error types.
+package error
+
+import "fmt"
+
+// ConfigmapError represents error with a configmap.
+type ConfigmapError struct {
+	Path     string
+	ErrorMsg string
+}
+
+func (e ConfigmapError) Error() string {
+	return fmt.Sprintf("Kustomization file [%s] encounters a configmap error: %s\n", e.Path, e.ErrorMsg)
+}
diff --git a/vendor/sigs.k8s.io/kustomize/pkg/internal/error/kustomizationerror.go b/vendor/sigs.k8s.io/kustomize/pkg/internal/error/kustomizationerror.go
new file mode 100644
index 00000000..0d53ca9b
--- /dev/null
+++ b/vendor/sigs.k8s.io/kustomize/pkg/internal/error/kustomizationerror.go
@@ -0,0 +1,61 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package error
+
+import (
+	"fmt"
+)
+
+// KustomizationError represents an error with a kustomization.
+type KustomizationError struct {
+	KustomizationPath string
+	ErrorMsg          string
+}
+
+func (ke KustomizationError) Error() string {
+	return fmt.Sprintf("Kustomization File [%s]: %s\n", ke.KustomizationPath, ke.ErrorMsg)
+}
+
+// KustomizationErrors collects all errors.
+type KustomizationErrors struct {
+	kErrors []error
+}
+
+func (ke *KustomizationErrors) Error() string {
+	errormsg := ""
+	for _, e := range ke.kErrors {
+		errormsg += e.Error() + "\n"
+	}
+	return errormsg
+}
+
+// Append adds error to a collection of errors.
+func (ke *KustomizationErrors) Append(e error) {
+	ke.kErrors = append(ke.kErrors, e)
+}
+
+// Get returns all collected errors.
+func (ke *KustomizationErrors) Get() []error {
+	return ke.kErrors
+}
+
+// BatchAppend adds all errors from another KustomizationErrors
+func (ke *KustomizationErrors) BatchAppend(e KustomizationErrors) {
+	for _, err := range e.Get() {
+		ke.kErrors = append(ke.kErrors, err)
+	}
+}
diff --git a/vendor/sigs.k8s.io/kustomize/pkg/internal/error/patcherror.go b/vendor/sigs.k8s.io/kustomize/pkg/internal/error/patcherror.go
new file mode 100644
index 00000000..60c9f80e
--- /dev/null
+++ b/vendor/sigs.k8s.io/kustomize/pkg/internal/error/patcherror.go
@@ -0,0 +1,32 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package error
+
+import (
+	"fmt"
+)
+
+// PatchError represents error during Patch.
+type PatchError struct {
+	KustomizationPath string
+	PatchFilepath     string
+	ErrorMsg          string
+}
+
+func (e PatchError) Error() string {
+	return fmt.Sprintf("Kustomization file [%s] encounters a patch error for [%s]: %s\n", e.KustomizationPath, e.PatchFilepath, e.ErrorMsg)
+}
diff --git a/vendor/sigs.k8s.io/kustomize/pkg/internal/error/resourceerror.go b/vendor/sigs.k8s.io/kustomize/pkg/internal/error/resourceerror.go
new file mode 100644
index 00000000..ef3566dd
--- /dev/null
+++ b/vendor/sigs.k8s.io/kustomize/pkg/internal/error/resourceerror.go
@@ -0,0 +1,30 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package error
+
+import "fmt"
+
+// ResourceError represents error in a resource.
+type ResourceError struct {
+	KustomizationPath string
+	ResourceFilepath  string
+	ErrorMsg          string
+}
+
+func (e ResourceError) Error() string {
+	return fmt.Sprintf("Kustomization file [%s] encounters a resource error for [%s]: %s\n", e.KustomizationPath, e.ResourceFilepath, e.ErrorMsg)
+}
diff --git a/vendor/sigs.k8s.io/kustomize/pkg/internal/error/secreterror.go b/vendor/sigs.k8s.io/kustomize/pkg/internal/error/secreterror.go
new file mode 100644
index 00000000..cd72759c
--- /dev/null
+++ b/vendor/sigs.k8s.io/kustomize/pkg/internal/error/secreterror.go
@@ -0,0 +1,30 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package error
+
+import "fmt"
+
+// SecretError represents error with a secret.
+type SecretError struct {
+	KustomizationPath string
+	// ErrorMsg is an error message
+	ErrorMsg string
+}
+
+func (e SecretError) Error() string {
+	return fmt.Sprintf("Kustomization file [%s] encounters a secret error: %s\n", e.KustomizationPath, e.ErrorMsg)
+}
diff --git a/vendor/sigs.k8s.io/kustomize/pkg/internal/error/yamlformaterror.go b/vendor/sigs.k8s.io/kustomize/pkg/internal/error/yamlformaterror.go
new file mode 100644
index 00000000..4c27d30d
--- /dev/null
+++ b/vendor/sigs.k8s.io/kustomize/pkg/internal/error/yamlformaterror.go
@@ -0,0 +1,48 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package error has contextual error types.
+package error
+
+import (
+	"fmt"
+	"strings"
+)
+
+// YamlFormatError represents error with yaml file name where json/yaml format error happens.
+type YamlFormatError struct {
+	Path     string
+	ErrorMsg string
+}
+
+func (e YamlFormatError) Error() string {
+	return fmt.Sprintf("YAML file [%s] encounters a format error.\n%s\n", e.Path, e.ErrorMsg)
+}
+
+// Handler handles YamlFormatError
+func Handler(e error, path string) error {
+	if isYAMLSyntaxError(e) {
+		return YamlFormatError{
+			Path:     path,
+			ErrorMsg: e.Error(),
+		}
+	}
+	return e
+}
+
+func isYAMLSyntaxError(e error) bool {
+	return strings.Contains(e.Error(), "error converting YAML to JSON") || strings.Contains(e.Error(), "error unmarshaling JSON")
+}
diff --git a/vendor/sigs.k8s.io/kustomize/pkg/loader/fileloader.go b/vendor/sigs.k8s.io/kustomize/pkg/loader/fileloader.go
new file mode 100644
index 00000000..4fa5dca6
--- /dev/null
+++ b/vendor/sigs.k8s.io/kustomize/pkg/loader/fileloader.go
@@ -0,0 +1,312 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package loader
+
+import (
+	"fmt"
+	"log"
+	"path/filepath"
+	"strings"
+
+	"sigs.k8s.io/kustomize/pkg/fs"
+	"sigs.k8s.io/kustomize/pkg/git"
+	"sigs.k8s.io/kustomize/pkg/ifc"
+)
+
+// fileLoader is a kustomization's interface to files.
+//
+// The directory in which a kustomization file sits
+// is referred to below as the kustomization's root.
+//
+// An instance of fileLoader has an immutable root,
+// and offers a `New` method returning a new loader
+// with a new root.
+//
+// A kustomization file refers to two kinds of files:
+//
+// * supplemental data paths
+//
+//   `Load` is used to visit these paths.
+//
+//   They must terminate in or below the root.
+//
+//   They hold things like resources, patches,
+//   data for ConfigMaps, etc.
+//
+// * bases; other kustomizations
+//
+//   `New` is used to load bases.
+//
+//   A base can be either a remote git repo URL, or
+//   a directory specified relative to the current
+//   root. In the former case, the repo is locally
+//   cloned, and the new loader is rooted on a path
+//   in that clone.
+//
+//   As loaders create new loaders, a root history
+//   is established, and used to disallow:
+//
+//   - A base that is a repository that, in turn,
+//     specifies a base repository seen previously
+//     in the loading stack (a cycle).
+//
+//   - An overlay depending on a base positioned at
+//     or above it.  I.e. '../foo' is OK, but '.',
+//     '..', '../..', etc. are disallowed.  Allowing
+//     such a base has no advantages and encourages
+//     cycles, particularly if some future change
+//     were to introduce globbing to file
+//     specifications in the kustomization file.
+//
+// These restrictions assure that kustomizations
+// are self-contained and relocatable, and impose
+// some safety when relying on remote kustomizations,
+// e.g. a ConfigMap generator specified to read
+// from /etc/passwd will fail.
+//
+type fileLoader struct {
+	// Loader that spawned this loader.
+	// Used to avoid cycles.
+	referrer *fileLoader
+	// An absolute, cleaned path to a directory.
+	// The Load function reads from this directory,
+	// or directories below it.
+	root fs.ConfirmedDir
+	// If this is non-nil, the files were
+	// obtained from the given repository.
+	repoSpec *git.RepoSpec
+	// File system utilities.
+	fSys fs.FileSystem
+	// Used to clone repositories.
+	cloner git.Cloner
+	// Used to clean up, as needed.
+	cleaner func() error
+}
+
+// NewFileLoaderAtCwd returns a loader that loads from ".".
+func NewFileLoaderAtCwd(fSys fs.FileSystem) *fileLoader {
+	return newLoaderOrDie(fSys, ".")
+}
+
+// NewFileLoaderAtRoot returns a loader that loads from "/".
+func NewFileLoaderAtRoot(fSys fs.FileSystem) *fileLoader {
+	return newLoaderOrDie(fSys, string(filepath.Separator))
+}
+
+// Root returns the absolute path that is prepended to any
+// relative paths used in Load.
+func (l *fileLoader) Root() string {
+	return l.root.String()
+}
+
+func newLoaderOrDie(fSys fs.FileSystem, path string) *fileLoader {
+	root, err := demandDirectoryRoot(fSys, path)
+	if err != nil {
+		log.Fatalf("unable to make loader at '%s'; %v", path, err)
+	}
+	return newLoaderAtConfirmedDir(
+		root, fSys, nil, git.ClonerUsingGitExec)
+}
+
+// newLoaderAtConfirmedDir returns a new fileLoader with given root.
+func newLoaderAtConfirmedDir(
+	root fs.ConfirmedDir, fSys fs.FileSystem,
+	referrer *fileLoader, cloner git.Cloner) *fileLoader {
+	return &fileLoader{
+		root:     root,
+		referrer: referrer,
+		fSys:     fSys,
+		cloner:   cloner,
+		cleaner:  func() error { return nil },
+	}
+}
+
+// Assure that the given path is in fact a directory.
+func demandDirectoryRoot(
+	fSys fs.FileSystem, path string) (fs.ConfirmedDir, error) {
+	if path == "" {
+		return "", fmt.Errorf(
+			"loader root cannot be empty")
+	}
+	d, f, err := fSys.CleanedAbs(path)
+	if err != nil {
+		return "", fmt.Errorf(
+			"absolute path error in '%s' : %v", path, err)
+	}
+	if f != "" {
+		return "", fmt.Errorf(
+			"got file '%s', but '%s' must be a directory to be a root",
+			f, path)
+	}
+	return d, nil
+}
+
+// New returns a new Loader, rooted relative to current loader,
+// or rooted in a temp directory holding a git repo clone.
+func (l *fileLoader) New(path string) (ifc.Loader, error) {
+	if path == "" {
+		return nil, fmt.Errorf("new root cannot be empty")
+	}
+	repoSpec, err := git.NewRepoSpecFromUrl(path)
+	if err == nil {
+		// Treat this as git repo clone request.
+		if err := l.errIfRepoCycle(repoSpec); err != nil {
+			return nil, err
+		}
+		return newLoaderAtGitClone(repoSpec, l.fSys, l.referrer, l.cloner)
+	}
+	if filepath.IsAbs(path) {
+		return nil, fmt.Errorf("new root '%s' cannot be absolute", path)
+	}
+	root, err := demandDirectoryRoot(l.fSys, l.root.Join(path))
+	if err != nil {
+		return nil, err
+	}
+	if err := l.errIfGitContainmentViolation(root); err != nil {
+		return nil, err
+	}
+	if err := l.errIfArgEqualOrHigher(root); err != nil {
+		return nil, err
+	}
+	return newLoaderAtConfirmedDir(
+		root, l.fSys, l, l.cloner), nil
+}
+
+// newLoaderAtGitClone returns a new Loader pinned to a temporary
+// directory holding a cloned git repo.
+func newLoaderAtGitClone(
+	repoSpec *git.RepoSpec, fSys fs.FileSystem,
+	referrer *fileLoader, cloner git.Cloner) (ifc.Loader, error) {
+	err := cloner(repoSpec)
+	if err != nil {
+		return nil, err
+	}
+	root, f, err := fSys.CleanedAbs(repoSpec.AbsPath())
+	if err != nil {
+		return nil, err
+	}
+	// We don't know that the path requested in repoSpec
+	// is a directory until we actually clone it and look
+	// inside.  That just happened, hence the error check
+	// is here.
+	if f != "" {
+		return nil, fmt.Errorf(
+			"'%s' refers to file '%s'; expecting directory",
+			repoSpec.AbsPath(), f)
+	}
+	return &fileLoader{
+		root:     root,
+		referrer: referrer,
+		repoSpec: repoSpec,
+		fSys:     fSys,
+		cloner:   cloner,
+		cleaner:  repoSpec.Cleaner(fSys),
+	}, nil
+}
+
+func (l *fileLoader) errIfGitContainmentViolation(
+	base fs.ConfirmedDir) error {
+	containingRepo := l.containingRepo()
+	if containingRepo == nil {
+		return nil
+	}
+	if !base.HasPrefix(containingRepo.CloneDir()) {
+		return fmt.Errorf(
+			"security; bases in kustomizations found in "+
+				"cloned git repos must be within the repo, "+
+				"but base '%s' is outside '%s'",
+			base, containingRepo.CloneDir())
+	}
+	return nil
+}
+
+// Looks back through referrers for a git repo, returning nil
+// if none found.
+func (l *fileLoader) containingRepo() *git.RepoSpec {
+	if l.repoSpec != nil {
+		return l.repoSpec
+	}
+	if l.referrer == nil {
+		return nil
+	}
+	return l.referrer.containingRepo()
+}
+
+// errIfArgEqualOrHigher tests whether the argument,
+// is equal to or above the root of any ancestor.
+func (l *fileLoader) errIfArgEqualOrHigher(
+	candidateRoot fs.ConfirmedDir) error {
+	if l.root.HasPrefix(candidateRoot) {
+		return fmt.Errorf(
+			"cycle detected: candidate root '%s' contains visited root '%s'",
+			candidateRoot, l.root)
+	}
+	if l.referrer == nil {
+		return nil
+	}
+	return l.referrer.errIfArgEqualOrHigher(candidateRoot)
+}
+
+// TODO(monopole): Distinguish branches?
+// I.e. Allow a distinction between git URI with
+// path foo and tag bar and a git URI with the same
+// path but a different tag?
+func (l *fileLoader) errIfRepoCycle(newRepoSpec *git.RepoSpec) error {
+	// TODO(monopole): Use parsed data instead of Raw().
+	if l.repoSpec != nil &&
+		strings.HasPrefix(l.repoSpec.Raw(), newRepoSpec.Raw()) {
+		return fmt.Errorf(
+			"cycle detected: URI '%s' referenced by previous URI '%s'",
+			newRepoSpec.Raw(), l.repoSpec.Raw())
+	}
+	if l.referrer == nil {
+		return nil
+	}
+	return l.referrer.errIfRepoCycle(newRepoSpec)
+}
+
+// Load returns content of file at the given relative path,
+// else an error.  The path must refer to a file in or
+// below the current root.
+func (l *fileLoader) Load(path string) ([]byte, error) {
+	if filepath.IsAbs(path) {
+		return nil, l.loadOutOfBounds(path)
+	}
+	d, f, err := l.fSys.CleanedAbs(l.root.Join(path))
+	if err != nil {
+		return nil, err
+	}
+	if f == "" {
+		return nil, fmt.Errorf(
+			"'%s' must be a file (got d='%s')", path, d)
+	}
+	if !d.HasPrefix(l.root) {
+		return nil, l.loadOutOfBounds(path)
+	}
+	return l.fSys.ReadFile(d.Join(f))
+}
+
+func (l *fileLoader) loadOutOfBounds(path string) error {
+	return fmt.Errorf(
+		"security; file '%s' is not in or below '%s'",
+		path, l.root)
+}
+
+// Cleanup runs the cleaner.
+func (l *fileLoader) Cleanup() error {
+	return l.cleaner()
+}
diff --git a/vendor/sigs.k8s.io/kustomize/pkg/loader/loader.go b/vendor/sigs.k8s.io/kustomize/pkg/loader/loader.go
new file mode 100644
index 00000000..53de6553
--- /dev/null
+++ b/vendor/sigs.k8s.io/kustomize/pkg/loader/loader.go
@@ -0,0 +1,39 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package loader has a data loading interface and various implementations.
+package loader
+
+import (
+	"sigs.k8s.io/kustomize/pkg/fs"
+	"sigs.k8s.io/kustomize/pkg/git"
+	"sigs.k8s.io/kustomize/pkg/ifc"
+)
+
+// NewLoader returns a Loader.
+func NewLoader(path string, fSys fs.FileSystem) (ifc.Loader, error) {
+	repoSpec, err := git.NewRepoSpecFromUrl(path)
+	if err == nil {
+		return newLoaderAtGitClone(
+			repoSpec, fSys, nil, git.ClonerUsingGitExec)
+	}
+	root, err := demandDirectoryRoot(fSys, path)
+	if err != nil {
+		return nil, err
+	}
+	return newLoaderAtConfirmedDir(
+		root, fSys, nil, git.ClonerUsingGitExec), nil
+}
diff --git a/vendor/sigs.k8s.io/kustomize/pkg/patch/json6902.go b/vendor/sigs.k8s.io/kustomize/pkg/patch/json6902.go
new file mode 100644
index 00000000..9ddb1faa
--- /dev/null
+++ b/vendor/sigs.k8s.io/kustomize/pkg/patch/json6902.go
@@ -0,0 +1,40 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package patch
+
+import "sigs.k8s.io/kustomize/pkg/gvk"
+
+// Json6902 represents a json patch for an object
+// with format documented https://tools.ietf.org/html/rfc6902.
+type Json6902 struct {
+	// Target refers to a Kubernetes object that the json patch will be
+	// applied to. It must refer to a Kubernetes resource under the
+	// purview of this kustomization. Target should use the
+	// raw name of the object (the name specified in its YAML,
+	// before addition of a namePrefix and a nameSuffix).
+	Target *Target `json:"target" yaml:"target"`
+
+	// relative file path for a json patch file inside a kustomization
+	Path string `json:"path,omitempty" yaml:"path,omitempty"`
+}
+
+// Target represents the kubernetes object that the patch is applied to
+type Target struct {
+	gvk.Gvk   `json:",inline,omitempty" yaml:",inline,omitempty"`
+	Namespace string `json:"namespace,omitempty" yaml:"namespace,omitempty"`
+	Name      string `json:"name" yaml:"name"`
+}
diff --git a/vendor/sigs.k8s.io/kustomize/pkg/patch/strategicmerge.go b/vendor/sigs.k8s.io/kustomize/pkg/patch/strategicmerge.go
new file mode 100644
index 00000000..596cc346
--- /dev/null
+++ b/vendor/sigs.k8s.io/kustomize/pkg/patch/strategicmerge.go
@@ -0,0 +1,40 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package patch
+
+// StrategicMerge represents a relative path to a
+// stategic merge patch with the format
+// https://github.com/kubernetes/community/blob/master/contributors/devel/strategic-merge-patch.md
+type StrategicMerge string
+
+// Append appends a slice of patch paths to a StrategicMerge slice
+func Append(patches []StrategicMerge, paths ...string) []StrategicMerge {
+	for _, p := range paths {
+		patches = append(patches, StrategicMerge(p))
+	}
+	return patches
+}
+
+// Exist determines if a patch path exists in a slice of StrategicMerge
+func Exist(patches []StrategicMerge, path string) bool {
+	for _, p := range patches {
+		if p == StrategicMerge(path) {
+			return true
+		}
+	}
+	return false
+}
diff --git a/vendor/sigs.k8s.io/kustomize/pkg/patch/transformer/factory.go b/vendor/sigs.k8s.io/kustomize/pkg/patch/transformer/factory.go
new file mode 100644
index 00000000..b373dfb7
--- /dev/null
+++ b/vendor/sigs.k8s.io/kustomize/pkg/patch/transformer/factory.go
@@ -0,0 +1,83 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package transformer
+
+import (
+	"fmt"
+	"sigs.k8s.io/kustomize/pkg/ifc"
+	"sigs.k8s.io/kustomize/pkg/resid"
+
+	"sigs.k8s.io/kustomize/pkg/gvk"
+	"sigs.k8s.io/kustomize/pkg/patch"
+	"sigs.k8s.io/kustomize/pkg/transformers"
+)
+
+// PatchJson6902Factory makes Json6902 transformers
+type PatchJson6902Factory struct {
+	loader ifc.Loader
+}
+
+// NewPatchJson6902Factory returns a new PatchJson6902Factory.
+func NewPatchJson6902Factory(l ifc.Loader) PatchJson6902Factory {
+	return PatchJson6902Factory{loader: l}
+}
+
+// MakePatchJson6902Transformer returns a transformer for applying Json6902 patch
+func (f PatchJson6902Factory) MakePatchJson6902Transformer(patches []patch.Json6902) (transformers.Transformer, error) {
+	var ts []transformers.Transformer
+	for _, p := range patches {
+		t, err := f.makeOnePatchJson6902Transformer(p)
+		if err != nil {
+			return nil, err
+		}
+		if t != nil {
+			ts = append(ts, t)
+		}
+	}
+	return transformers.NewMultiTransformerWithConflictCheck(ts), nil
+}
+
+func (f PatchJson6902Factory) makeOnePatchJson6902Transformer(p patch.Json6902) (transformers.Transformer, error) {
+	if p.Target == nil {
+		return nil, fmt.Errorf("must specify the target field in patchesJson6902")
+	}
+	if p.Path == "" {
+		return nil, fmt.Errorf("must specify the path for a json patch file")
+	}
+
+	targetId := resid.NewResIdWithPrefixNamespace(
+		gvk.Gvk{
+			Group:   p.Target.Group,
+			Version: p.Target.Version,
+			Kind:    p.Target.Kind,
+		},
+		p.Target.Name,
+		"",
+		p.Target.Namespace,
+	)
+
+	rawOp, err := f.loader.Load(p.Path)
+	if err != nil {
+		return nil, err
+	}
+
+	return newPatchJson6902JSONTransformer(targetId, rawOp)
+}
+
+func isJsonFormat(data []byte) bool {
+	return data[0] == '['
+}
diff --git a/vendor/sigs.k8s.io/kustomize/pkg/patch/transformer/patchjson6902json.go b/vendor/sigs.k8s.io/kustomize/pkg/patch/transformer/patchjson6902json.go
new file mode 100644
index 00000000..1f09939d
--- /dev/null
+++ b/vendor/sigs.k8s.io/kustomize/pkg/patch/transformer/patchjson6902json.go
@@ -0,0 +1,108 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package transformer
+
+import (
+	"fmt"
+
+	"github.com/evanphx/json-patch"
+	"github.com/ghodss/yaml"
+	"github.com/pkg/errors"
+	"sigs.k8s.io/kustomize/pkg/resid"
+	"sigs.k8s.io/kustomize/pkg/resmap"
+	"sigs.k8s.io/kustomize/pkg/resource"
+	"sigs.k8s.io/kustomize/pkg/transformers"
+)
+
+// patchJson6902JSONTransformer applies patches.
+type patchJson6902JSONTransformer struct {
+	target resid.ResId
+	patch  jsonpatch.Patch
+	rawOp  []byte
+}
+
+var _ transformers.Transformer = &patchJson6902JSONTransformer{}
+
+// newPatchJson6902JSONTransformer constructs a PatchJson6902 transformer.
+func newPatchJson6902JSONTransformer(
+	id resid.ResId, rawOp []byte) (transformers.Transformer, error) {
+	op := rawOp
+	var err error
+	if !isJsonFormat(op) {
+		// if it isn't JSON, try to parse it as YAML
+		op, err = yaml.YAMLToJSON(rawOp)
+		if err != nil {
+			return nil, err
+		}
+	}
+	decodedPatch, err := jsonpatch.DecodePatch(op)
+	if err != nil {
+		return nil, err
+	}
+	if len(decodedPatch) == 0 {
+		return transformers.NewNoOpTransformer(), nil
+	}
+	return &patchJson6902JSONTransformer{target: id, patch: decodedPatch, rawOp: rawOp}, nil
+}
+
+// Transform apply the json patches on top of the base resources.
+func (t *patchJson6902JSONTransformer) Transform(m resmap.ResMap) error {
+	obj, err := t.findTargetObj(m)
+	if err != nil {
+		return err
+	}
+	rawObj, err := obj.MarshalJSON()
+	if err != nil {
+		return err
+	}
+	modifiedObj, err := t.patch.Apply(rawObj)
+	if err != nil {
+		return errors.Wrapf(err, "failed to apply json patch '%s'", string(t.rawOp))
+	}
+	err = obj.UnmarshalJSON(modifiedObj)
+	if err != nil {
+		return err
+	}
+	return nil
+}
+
+func (t *patchJson6902JSONTransformer) findTargetObj(
+	m resmap.ResMap) (*resource.Resource, error) {
+	var matched []resid.ResId
+	// TODO(monopole): namespace bug in json patch?
+	// Since introduction in PR #300
+	// (see pkg/patch/transformer/util.go),
+	// this code has treated an empty namespace like a wildcard
+	// rather than like an additional restriction to match
+	// only the empty namespace.  No test coverage to confirm.
+	// Not sure if desired, keeping it for now.
+	if t.target.Namespace() != "" {
+		matched = m.GetMatchingIds(t.target.NsGvknEquals)
+	} else {
+		matched = m.GetMatchingIds(t.target.GvknEquals)
+	}
+	if len(matched) == 0 {
+		return nil, fmt.Errorf(
+			"couldn't find target %v for json patch", t.target)
+	}
+	if len(matched) > 1 {
+		return nil, fmt.Errorf(
+			"found multiple targets %v matching %v for json patch",
+			matched, t.target)
+	}
+	return m[matched[0]], nil
+}
diff --git a/vendor/sigs.k8s.io/kustomize/pkg/resid/resid.go b/vendor/sigs.k8s.io/kustomize/pkg/resid/resid.go
new file mode 100644
index 00000000..dbf9a3e9
--- /dev/null
+++ b/vendor/sigs.k8s.io/kustomize/pkg/resid/resid.go
@@ -0,0 +1,207 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package resid
+
+import (
+	"strings"
+
+	"sigs.k8s.io/kustomize/pkg/gvk"
+)
+
+// ResId is an immutable identifier of a k8s resource object.
+type ResId struct {
+	// Gvk of the resource.
+	gvKind gvk.Gvk
+
+	// name of the resource before transformation.
+	name string
+
+	// namePrefix of the resource.
+	// An untransformed resource has no prefix.
+	// A fully transformed resource has an arbitrary
+	// number of prefixes concatenated together.
+	prefix string
+
+	// nameSuffix of the resource.
+	// An untransformed resource has no suffix.
+	// A fully transformed resource has an arbitrary
+	// number of suffixes concatenated together.
+	suffix string
+
+	// Namespace the resource belongs to.
+	// An untransformed resource has no namespace.
+	// A fully transformed resource has the namespace
+	// from the top most overlay.
+	namespace string
+}
+
+// NewResIdWithPrefixSuffixNamespace creates new resource identifier with a prefix, suffix and a namespace
+func NewResIdWithPrefixSuffixNamespace(k gvk.Gvk, n, p, s, ns string) ResId {
+	return ResId{gvKind: k, name: n, prefix: p, suffix: s, namespace: ns}
+}
+
+// NewResIdWithPrefixNamespace creates new resource identifier with a prefix and a namespace
+func NewResIdWithPrefixNamespace(k gvk.Gvk, n, p, ns string) ResId {
+	return ResId{gvKind: k, name: n, prefix: p, namespace: ns}
+}
+
+// NewResIdWithSuffixNamespace creates new resource identifier with a suffix and a namespace
+func NewResIdWithSuffixNamespace(k gvk.Gvk, n, s, ns string) ResId {
+	return ResId{gvKind: k, name: n, suffix: s, namespace: ns}
+}
+
+// NewResIdWithPrefixSuffix creates new resource identifier with a prefix and suffix
+func NewResIdWithPrefixSuffix(k gvk.Gvk, n, p, s string) ResId {
+	return ResId{gvKind: k, name: n, prefix: p, suffix: s}
+}
+
+// NewResId creates new resource identifier
+func NewResId(k gvk.Gvk, n string) ResId {
+	return ResId{gvKind: k, name: n}
+}
+
+// NewResIdKindOnly creates new resource identifier
+func NewResIdKindOnly(k string, n string) ResId {
+	return ResId{gvKind: gvk.FromKind(k), name: n}
+}
+
+const (
+	noNamespace = "~X"
+	noPrefix    = "~P"
+	noName      = "~N"
+	noSuffix    = "~S"
+	separator   = "|"
+)
+
+// String of ResId based on GVK, name and prefix
+func (n ResId) String() string {
+	ns := n.namespace
+	if ns == "" {
+		ns = noNamespace
+	}
+	p := n.prefix
+	if p == "" {
+		p = noPrefix
+	}
+	nm := n.name
+	if nm == "" {
+		nm = noName
+	}
+	s := n.suffix
+	if s == "" {
+		s = noSuffix
+	}
+
+	return strings.Join(
+		[]string{n.gvKind.String(), ns, p, nm, s}, separator)
+}
+
+// GvknString of ResId based on GVK and name
+func (n ResId) GvknString() string {
+	return n.gvKind.String() + separator + n.name
+}
+
+// GvknEquals returns true if the other id matches
+// Group/Version/Kind/name.
+func (n ResId) GvknEquals(id ResId) bool {
+	return n.name == id.name && n.gvKind.Equals(id.gvKind)
+}
+
+// NsGvknEquals returns true if the other id matches
+// namespace/Group/Version/Kind/name.
+func (n ResId) NsGvknEquals(id ResId) bool {
+	return n.namespace == id.namespace && n.GvknEquals(id)
+}
+
+// Gvk returns Group/Version/Kind of the resource.
+func (n ResId) Gvk() gvk.Gvk {
+	return n.gvKind
+}
+
+// Name returns resource name.
+func (n ResId) Name() string {
+	return n.name
+}
+
+// Namespace returns resource namespace.
+func (n ResId) Namespace() string {
+	return n.namespace
+}
+
+// CopyWithNewPrefixSuffix make a new copy from current ResId
+// and append a new prefix and suffix
+func (n ResId) CopyWithNewPrefixSuffix(p, s string) ResId {
+	result := n
+	if p != "" {
+		result.prefix = n.concatPrefix(p)
+	}
+	if s != "" {
+		result.suffix = n.concatSuffix(s)
+	}
+	return result
+}
+
+// CopyWithNewNamespace make a new copy from current ResId and set a new namespace
+func (n ResId) CopyWithNewNamespace(ns string) ResId {
+	result := n
+	result.namespace = ns
+	return result
+}
+
+// HasSameLeftmostPrefix check if two ResIds have the same
+// left most prefix.
+func (n ResId) HasSameLeftmostPrefix(id ResId) bool {
+	prefixes1 := n.prefixList()
+	prefixes2 := id.prefixList()
+	return prefixes1[0] == prefixes2[0]
+}
+
+// HasSameRightmostSuffix check if two ResIds have the same
+// right most suffix.
+func (n ResId) HasSameRightmostSuffix(id ResId) bool {
+	suffixes1 := n.suffixList()
+	suffixes2 := id.suffixList()
+	return suffixes1[len(suffixes1)-1] == suffixes2[len(suffixes2)-1]
+}
+
+func (n ResId) concatPrefix(p string) string {
+	if p == "" {
+		return n.prefix
+	}
+	if n.prefix == "" {
+		return p
+	}
+	return p + ":" + n.prefix
+}
+
+func (n ResId) concatSuffix(s string) string {
+	if s == "" {
+		return n.suffix
+	}
+	if n.suffix == "" {
+		return s
+	}
+	return n.suffix + ":" + s
+}
+
+func (n ResId) prefixList() []string {
+	return strings.Split(n.prefix, ":")
+}
+
+func (n ResId) suffixList() []string {
+	return strings.Split(n.suffix, ":")
+}
diff --git a/vendor/sigs.k8s.io/kustomize/pkg/resmap/factory.go b/vendor/sigs.k8s.io/kustomize/pkg/resmap/factory.go
new file mode 100644
index 00000000..923cde23
--- /dev/null
+++ b/vendor/sigs.k8s.io/kustomize/pkg/resmap/factory.go
@@ -0,0 +1,123 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package resmap
+
+import (
+	"fmt"
+
+	"github.com/pkg/errors"
+	"sigs.k8s.io/kustomize/pkg/ifc"
+	internal "sigs.k8s.io/kustomize/pkg/internal/error"
+	"sigs.k8s.io/kustomize/pkg/resource"
+	"sigs.k8s.io/kustomize/pkg/types"
+)
+
+// Factory makes instances of ResMap.
+type Factory struct {
+	resF *resource.Factory
+}
+
+// NewFactory returns a new resmap.Factory.
+func NewFactory(rf *resource.Factory) *Factory {
+	return &Factory{resF: rf}
+}
+
+// RF returns a resource.Factory.
+func (rmF *Factory) RF() *resource.Factory {
+	return rmF.resF
+}
+
+// FromFiles returns a ResMap given a resource path slice.
+func (rmF *Factory) FromFiles(
+	loader ifc.Loader, paths []string) (ResMap, error) {
+	var result []ResMap
+	for _, path := range paths {
+		content, err := loader.Load(path)
+		if err != nil {
+			return nil, errors.Wrap(err, "Load from path "+path+" failed")
+		}
+		res, err := rmF.NewResMapFromBytes(content)
+		if err != nil {
+			return nil, internal.Handler(err, path)
+		}
+		result = append(result, res)
+	}
+	return MergeWithErrorOnIdCollision(result...)
+}
+
+// newResMapFromBytes decodes a list of objects in byte array format.
+func (rmF *Factory) NewResMapFromBytes(b []byte) (ResMap, error) {
+	resources, err := rmF.resF.SliceFromBytes(b)
+	if err != nil {
+		return nil, err
+	}
+
+	result := ResMap{}
+	for _, res := range resources {
+		id := res.Id()
+		if _, found := result[id]; found {
+			return result, fmt.Errorf("GroupVersionKindName: %#v already exists b the map", id)
+		}
+		result[id] = res
+	}
+	return result, nil
+}
+
+// NewResMapFromConfigMapArgs returns a Resource slice given
+// a configmap metadata slice from kustomization file.
+func (rmF *Factory) NewResMapFromConfigMapArgs(argList []types.ConfigMapArgs, options *types.GeneratorOptions) (ResMap, error) {
+	var resources []*resource.Resource
+	for _, args := range argList {
+		res, err := rmF.resF.MakeConfigMap(&args, options)
+		if err != nil {
+			return nil, errors.Wrap(err, "NewResMapFromConfigMapArgs")
+		}
+		resources = append(resources, res)
+	}
+	return newResMapFromResourceSlice(resources)
+}
+
+// NewResMapFromSecretArgs takes a SecretArgs slice, generates
+// secrets from each entry, and accumulates them in a ResMap.
+func (rmF *Factory) NewResMapFromSecretArgs(argsList []types.SecretArgs, options *types.GeneratorOptions) (ResMap, error) {
+	var resources []*resource.Resource
+	for _, args := range argsList {
+		res, err := rmF.resF.MakeSecret(&args, options)
+		if err != nil {
+			return nil, errors.Wrap(err, "NewResMapFromSecretArgs")
+		}
+		resources = append(resources, res)
+	}
+	return newResMapFromResourceSlice(resources)
+}
+
+// Set sets the loader for the underlying factory
+func (rmF *Factory) Set(ldr ifc.Loader) {
+	rmF.resF.Set(ldr)
+}
+
+func newResMapFromResourceSlice(resources []*resource.Resource) (ResMap, error) {
+	result := ResMap{}
+	for _, res := range resources {
+		id := res.Id()
+		if _, found := result[id]; found {
+			return nil, fmt.Errorf("duplicated %#v is not allowed", id)
+		}
+		result[id] = res
+	}
+	return result, nil
+}
diff --git a/vendor/sigs.k8s.io/kustomize/pkg/resmap/idslice.go b/vendor/sigs.k8s.io/kustomize/pkg/resmap/idslice.go
new file mode 100644
index 00000000..cdf75920
--- /dev/null
+++ b/vendor/sigs.k8s.io/kustomize/pkg/resmap/idslice.go
@@ -0,0 +1,37 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package resmap
+
+import (
+	"sort"
+
+	"sigs.k8s.io/kustomize/pkg/resid"
+)
+
+// IdSlice implements the sort interface.
+type IdSlice []resid.ResId
+
+var _ sort.Interface = IdSlice{}
+
+func (a IdSlice) Len() int      { return len(a) }
+func (a IdSlice) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
+func (a IdSlice) Less(i, j int) bool {
+	if !a[i].Gvk().Equals(a[j].Gvk()) {
+		return a[i].Gvk().IsLessThan(a[j].Gvk())
+	}
+	return a[i].String() < a[j].String()
+}
diff --git a/vendor/sigs.k8s.io/kustomize/pkg/resmap/resmap.go b/vendor/sigs.k8s.io/kustomize/pkg/resmap/resmap.go
new file mode 100644
index 00000000..ca1e7239
--- /dev/null
+++ b/vendor/sigs.k8s.io/kustomize/pkg/resmap/resmap.go
@@ -0,0 +1,200 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package resmap implements a map from ResId to Resource that tracks all resources in a kustomization.
+package resmap
+
+import (
+	"bytes"
+	"fmt"
+	"reflect"
+	"sort"
+
+	"github.com/ghodss/yaml"
+	"sigs.k8s.io/kustomize/pkg/resid"
+	"sigs.k8s.io/kustomize/pkg/resource"
+	"sigs.k8s.io/kustomize/pkg/types"
+)
+
+// ResMap is a map from ResId to Resource.
+type ResMap map[resid.ResId]*resource.Resource
+
+type IdMatcher func(resid.ResId) bool
+
+// GetMatchingIds returns a slice of ResId keys from the map
+// that all satisfy the given matcher function.
+func (m ResMap) GetMatchingIds(matches IdMatcher) []resid.ResId {
+	var result []resid.ResId
+	for id := range m {
+		if matches(id) {
+			result = append(result, id)
+		}
+	}
+	return result
+}
+
+// EncodeAsYaml encodes a ResMap to YAML; encoded objects separated by `---`.
+func (m ResMap) EncodeAsYaml() ([]byte, error) {
+	var ids []resid.ResId
+	for id := range m {
+		ids = append(ids, id)
+	}
+	sort.Sort(IdSlice(ids))
+
+	firstObj := true
+	var b []byte
+	buf := bytes.NewBuffer(b)
+	for _, id := range ids {
+		obj := m[id]
+		out, err := yaml.Marshal(obj.Map())
+		if err != nil {
+			return nil, err
+		}
+		if firstObj {
+			firstObj = false
+		} else {
+			_, err = buf.WriteString("---\n")
+			if err != nil {
+				return nil, err
+			}
+		}
+		_, err = buf.Write(out)
+		if err != nil {
+			return nil, err
+		}
+	}
+	return buf.Bytes(), nil
+}
+
+// ErrorIfNotEqual returns error if maps are not equal.
+func (m ResMap) ErrorIfNotEqual(m2 ResMap) error {
+	if len(m) != len(m2) {
+		var keySet1 []resid.ResId
+		var keySet2 []resid.ResId
+		for id := range m {
+			keySet1 = append(keySet1, id)
+		}
+		for id := range m2 {
+			keySet2 = append(keySet2, id)
+		}
+		return fmt.Errorf("maps has different number of entries: %#v doesn't equals %#v", keySet1, keySet2)
+	}
+	for id, obj1 := range m {
+		obj2, found := m2[id]
+		if !found {
+			return fmt.Errorf("%#v doesn't exist in %#v", id, m2)
+		}
+		if !reflect.DeepEqual(obj1, obj2) {
+			return fmt.Errorf("%#v doesn't deep equal %#v", obj1, obj2)
+		}
+	}
+	return nil
+}
+
+// DeepCopy clone the resmap into a new one
+func (m ResMap) DeepCopy(rf *resource.Factory) ResMap {
+	mcopy := make(ResMap)
+	for id, obj := range m {
+		mcopy[id] = obj.DeepCopy()
+	}
+	return mcopy
+}
+
+// FilterBy returns a subset ResMap containing ResIds with
+// the same namespace and leftmost name prefix and rightmost name
+// as the inputId. If inputId is a cluster level resource, this
+// returns the original ResMap.
+func (m ResMap) FilterBy(inputId resid.ResId) ResMap {
+	if inputId.Gvk().IsClusterKind() {
+		return m
+	}
+	result := ResMap{}
+	for id, res := range m {
+		if id.Gvk().IsClusterKind() || id.Namespace() == inputId.Namespace() &&
+			id.HasSameLeftmostPrefix(inputId) &&
+			id.HasSameRightmostSuffix(inputId) {
+			result[id] = res
+		}
+	}
+	return result
+}
+
+// MergeWithErrorOnIdCollision combines multiple ResMap instances, failing on
+// key collision and skipping nil maps.
+// If all of the maps are nil, an empty ResMap is returned.
+func MergeWithErrorOnIdCollision(maps ...ResMap) (ResMap, error) {
+	result := ResMap{}
+	for _, m := range maps {
+		if m == nil {
+			continue
+		}
+		for id, res := range m {
+			if _, found := result[id]; found {
+				return nil, fmt.Errorf("id '%q' already used", id)
+			}
+			result[id] = res
+		}
+	}
+	return result, nil
+}
+
+// MergeWithOverride combines multiple ResMap instances, allowing and sometimes
+// demanding certain collisions and skipping nil maps.
+// A collision would be demanded, say, when a generated ConfigMap has the
+// "replace" option in its generation instructions, meaning it is supposed
+// to replace something from the raw resources list.
+// If all of the maps are nil, an empty ResMap is returned.
+// When looping over the instances to combine them, if a resource id for
+// resource X is found to be already in the combined map, then the behavior
+// field for X must be BehaviorMerge or BehaviorReplace.  If X is not in the
+// map, then it's behavior cannot be merge or replace.
+func MergeWithOverride(maps ...ResMap) (ResMap, error) {
+	result := maps[0]
+	if result == nil {
+		result = ResMap{}
+	}
+	for _, m := range maps[1:] {
+		if m == nil {
+			continue
+		}
+		for id, r := range m {
+			matchedId := result.GetMatchingIds(id.GvknEquals)
+			if len(matchedId) == 1 {
+				id = matchedId[0]
+				switch r.Behavior() {
+				case types.BehaviorReplace:
+					r.Replace(result[id])
+					result[id] = r
+				case types.BehaviorMerge:
+					r.Merge(result[id])
+					result[id] = r
+				default:
+					return nil, fmt.Errorf("id %#v exists; must merge or replace", id)
+				}
+			} else if len(matchedId) == 0 {
+				switch r.Behavior() {
+				case types.BehaviorMerge, types.BehaviorReplace:
+					return nil, fmt.Errorf("id %#v does not exist; cannot merge or replace", id)
+				default:
+					result[id] = r
+				}
+			} else {
+				return nil, fmt.Errorf("merge conflict, found multiple objects %v the Resmap %v can merge into", matchedId, id)
+			}
+		}
+	}
+	return result, nil
+}
diff --git a/vendor/sigs.k8s.io/kustomize/pkg/resource/factory.go b/vendor/sigs.k8s.io/kustomize/pkg/resource/factory.go
new file mode 100644
index 00000000..148323dd
--- /dev/null
+++ b/vendor/sigs.k8s.io/kustomize/pkg/resource/factory.go
@@ -0,0 +1,148 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package resource
+
+import (
+	"encoding/json"
+	"fmt"
+	"log"
+	"strings"
+
+	"sigs.k8s.io/kustomize/pkg/ifc"
+	internal "sigs.k8s.io/kustomize/pkg/internal/error"
+	"sigs.k8s.io/kustomize/pkg/patch"
+	"sigs.k8s.io/kustomize/pkg/types"
+)
+
+// Factory makes instances of Resource.
+type Factory struct {
+	kf ifc.KunstructuredFactory
+}
+
+// NewFactory makes an instance of Factory.
+func NewFactory(kf ifc.KunstructuredFactory) *Factory {
+	return &Factory{kf: kf}
+}
+
+// FromMap returns a new instance of Resource.
+func (rf *Factory) FromMap(m map[string]interface{}) *Resource {
+	return &Resource{
+		Kunstructured: rf.kf.FromMap(m),
+		options:       types.NewGenArgs(nil, nil),
+	}
+}
+
+// FromMapAndOption returns a new instance of Resource with given options.
+func (rf *Factory) FromMapAndOption(m map[string]interface{}, args *types.GeneratorArgs, option *types.GeneratorOptions) *Resource {
+	return &Resource{
+		Kunstructured: rf.kf.FromMap(m),
+		options:       types.NewGenArgs(args, option),
+	}
+}
+
+// FromKunstructured returns a new instance of Resource.
+func (rf *Factory) FromKunstructured(
+	u ifc.Kunstructured) *Resource {
+	if u == nil {
+		log.Fatal("unstruct ifc must not be null")
+	}
+	return &Resource{
+		Kunstructured: u,
+		options:       types.NewGenArgs(nil, nil),
+	}
+}
+
+// SliceFromPatches returns a slice of resources given a patch path
+// slice from a kustomization file.
+func (rf *Factory) SliceFromPatches(
+	ldr ifc.Loader, paths []patch.StrategicMerge) ([]*Resource, error) {
+	var result []*Resource
+	for _, path := range paths {
+		content, err := ldr.Load(string(path))
+		if err != nil {
+			return nil, err
+		}
+		res, err := rf.SliceFromBytes(content)
+		if err != nil {
+			return nil, internal.Handler(err, string(path))
+		}
+		result = append(result, res...)
+	}
+	return result, nil
+}
+
+// SliceFromBytes unmarshalls bytes into a Resource slice.
+func (rf *Factory) SliceFromBytes(in []byte) ([]*Resource, error) {
+	kunStructs, err := rf.kf.SliceFromBytes(in)
+	if err != nil {
+		return nil, err
+	}
+	var result []*Resource
+	for len(kunStructs) > 0 {
+		u := kunStructs[0]
+		kunStructs = kunStructs[1:]
+		if strings.HasSuffix(u.GetKind(), "List") {
+			items := u.Map()["items"]
+			itemsSlice, ok := items.([]interface{})
+			if !ok {
+				if items == nil {
+					// an empty list
+					continue
+				}
+				return nil, fmt.Errorf("items in List is type %T, expected array", items)
+			}
+			for _, item := range itemsSlice {
+				itemJSON, err := json.Marshal(item)
+				if err != nil {
+					return nil, err
+				}
+				innerU, err := rf.kf.SliceFromBytes(itemJSON)
+				if err != nil {
+					return nil, err
+				}
+				// append innerU to kunStructs so nested Lists can be handled
+				kunStructs = append(kunStructs, innerU...)
+			}
+		} else {
+			result = append(result, rf.FromKunstructured(u))
+		}
+	}
+	return result, nil
+}
+
+// Set sets the loader for the underlying factory
+func (rf *Factory) Set(ldr ifc.Loader) {
+	rf.kf.Set(ldr)
+}
+
+// MakeConfigMap makes an instance of Resource for ConfigMap
+func (rf *Factory) MakeConfigMap(args *types.ConfigMapArgs, options *types.GeneratorOptions) (*Resource, error) {
+	u, err := rf.kf.MakeConfigMap(args, options)
+	if err != nil {
+		return nil, err
+	}
+	return &Resource{Kunstructured: u, options: types.NewGenArgs(&types.GeneratorArgs{Behavior: args.Behavior}, options)}, nil
+}
+
+// MakeSecret makes an instance of Resource for Secret
+func (rf *Factory) MakeSecret(args *types.SecretArgs, options *types.GeneratorOptions) (*Resource, error) {
+	u, err := rf.kf.MakeSecret(args, options)
+	if err != nil {
+		return nil, err
+	}
+	return &Resource{Kunstructured: u, options: types.NewGenArgs(&types.GeneratorArgs{Behavior: args.Behavior}, options)}, nil
+}
diff --git a/vendor/sigs.k8s.io/kustomize/pkg/resource/resource.go b/vendor/sigs.k8s.io/kustomize/pkg/resource/resource.go
new file mode 100644
index 00000000..1e0e3764
--- /dev/null
+++ b/vendor/sigs.k8s.io/kustomize/pkg/resource/resource.go
@@ -0,0 +1,107 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package resource implements representations of k8s API resources as "unstructured" objects.
+package resource
+
+import (
+	"strings"
+
+	"sigs.k8s.io/kustomize/pkg/ifc"
+	"sigs.k8s.io/kustomize/pkg/resid"
+	"sigs.k8s.io/kustomize/pkg/types"
+)
+
+// Resource is map representation of a Kubernetes API resource object
+// paired with a GenerationBehavior.
+type Resource struct {
+	ifc.Kunstructured
+	options *types.GenArgs
+}
+
+// String returns resource as JSON.
+func (r *Resource) String() string {
+	bs, err := r.MarshalJSON()
+	if err != nil {
+		return "<" + err.Error() + ">"
+	}
+	return strings.TrimSpace(string(bs)) + r.options.String()
+}
+
+// DeepCopy returns a new copy of resource
+func (r *Resource) DeepCopy() *Resource {
+	return &Resource{
+		Kunstructured: r.Kunstructured.Copy(),
+		options:       r.options,
+	}
+}
+
+// Behavior returns the behavior for the resource.
+func (r *Resource) Behavior() types.GenerationBehavior {
+	return r.options.Behavior()
+}
+
+// NeedAppendHash checks if the resource need a hash suffix
+func (r *Resource) NeedHashSuffix() bool {
+	return r.options != nil && r.options.NeedsHashSuffix()
+}
+
+// Id returns the ResId for the resource.
+func (r *Resource) Id() resid.ResId {
+	namespace, _ := r.GetFieldValue("metadata.namespace")
+	return resid.NewResIdWithPrefixNamespace(r.GetGvk(), r.GetName(), "", namespace)
+}
+
+// Merge performs merge with other resource.
+func (r *Resource) Merge(other *Resource) {
+	r.Replace(other)
+	mergeConfigmap(r.Map(), other.Map(), r.Map())
+}
+
+// Replace performs replace with other resource.
+func (r *Resource) Replace(other *Resource) {
+	r.SetLabels(mergeStringMaps(other.GetLabels(), r.GetLabels()))
+	r.SetAnnotations(
+		mergeStringMaps(other.GetAnnotations(), r.GetAnnotations()))
+	r.SetName(other.GetName())
+	r.options = other.options
+}
+
+// TODO: Add BinaryData once we sync to new k8s.io/api
+func mergeConfigmap(
+	mergedTo map[string]interface{},
+	maps ...map[string]interface{}) {
+	mergedMap := map[string]interface{}{}
+	for _, m := range maps {
+		datamap, ok := m["data"].(map[string]interface{})
+		if ok {
+			for key, value := range datamap {
+				mergedMap[key] = value
+			}
+		}
+	}
+	mergedTo["data"] = mergedMap
+}
+
+func mergeStringMaps(maps ...map[string]string) map[string]string {
+	result := map[string]string{}
+	for _, m := range maps {
+		for key, value := range m {
+			result[key] = value
+		}
+	}
+	return result
+}
diff --git a/vendor/sigs.k8s.io/kustomize/pkg/target/kusttarget.go b/vendor/sigs.k8s.io/kustomize/pkg/target/kusttarget.go
new file mode 100644
index 00000000..f136b268
--- /dev/null
+++ b/vendor/sigs.k8s.io/kustomize/pkg/target/kusttarget.go
@@ -0,0 +1,315 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package target implements state for the set of all resources to customize.
+package target
+
+import (
+	"bytes"
+	"encoding/json"
+	"fmt"
+	"strings"
+
+	"github.com/ghodss/yaml"
+	"github.com/pkg/errors"
+	"sigs.k8s.io/kustomize/pkg/constants"
+	"sigs.k8s.io/kustomize/pkg/ifc"
+	"sigs.k8s.io/kustomize/pkg/ifc/transformer"
+	interror "sigs.k8s.io/kustomize/pkg/internal/error"
+	patchtransformer "sigs.k8s.io/kustomize/pkg/patch/transformer"
+	"sigs.k8s.io/kustomize/pkg/resmap"
+	"sigs.k8s.io/kustomize/pkg/resource"
+	"sigs.k8s.io/kustomize/pkg/transformers"
+	"sigs.k8s.io/kustomize/pkg/transformers/config"
+	"sigs.k8s.io/kustomize/pkg/types"
+)
+
+// KustTarget encapsulates the entirety of a kustomization build.
+type KustTarget struct {
+	kustomization *types.Kustomization
+	ldr           ifc.Loader
+	rFactory      *resmap.Factory
+	tFactory      transformer.Factory
+}
+
+// NewKustTarget returns a new instance of KustTarget primed with a Loader.
+func NewKustTarget(
+	ldr ifc.Loader,
+	rFactory *resmap.Factory,
+	tFactory transformer.Factory) (*KustTarget, error) {
+	content, err := loadKustFile(ldr)
+	if err != nil {
+		return nil, err
+	}
+	content = types.DealWithDeprecatedFields(content)
+	var k types.Kustomization
+	err = unmarshal(content, &k)
+	if err != nil {
+		return nil, err
+	}
+	errs := k.EnforceFields()
+	if len(errs) > 0 {
+		return nil, fmt.Errorf("Failed to read kustomization file under %s:\n"+strings.Join(errs, "\n"), ldr.Root())
+	}
+	return &KustTarget{
+		kustomization: &k,
+		ldr:           ldr,
+		rFactory:      rFactory,
+		tFactory:      tFactory,
+	}, nil
+}
+
+func quoted(l []string) []string {
+	r := make([]string, len(l))
+	for i, v := range l {
+		r[i] = "'" + v + "'"
+	}
+	return r
+}
+
+func commaOr(q []string) string {
+	return strings.Join(q[:len(q)-1], ", ") + " or " + q[len(q)-1]
+}
+
+func loadKustFile(ldr ifc.Loader) ([]byte, error) {
+	var content []byte
+	match := 0
+	for _, kf := range constants.KustomizationFileNames {
+		c, err := ldr.Load(kf)
+		if err == nil {
+			match += 1
+			content = c
+		}
+	}
+	switch match {
+	case 0:
+		return nil, fmt.Errorf(
+			"unable to find one of %v in directory '%s'",
+			commaOr(quoted(constants.KustomizationFileNames)), ldr.Root())
+	case 1:
+		return content, nil
+	default:
+		return nil, fmt.Errorf("Found multiple kustomization files under: %s\n", ldr.Root())
+	}
+}
+
+func unmarshal(y []byte, o interface{}) error {
+	j, err := yaml.YAMLToJSON(y)
+	if err != nil {
+		return err
+	}
+	dec := json.NewDecoder(bytes.NewReader(j))
+	dec.DisallowUnknownFields()
+	return dec.Decode(o)
+}
+
+// MakeCustomizedResMap creates a ResMap per kustomization instructions.
+// The Resources in the returned ResMap are fully customized.
+func (kt *KustTarget) MakeCustomizedResMap() (resmap.ResMap, error) {
+	ra, err := kt.AccumulateTarget()
+	if err != nil {
+		return nil, err
+	}
+	err = ra.Transform(kt.tFactory.MakeHashTransformer())
+	if err != nil {
+		return nil, err
+	}
+	// Given that names have changed (prefixs/suffixes added),
+	// fix all the back references to those names.
+	err = ra.FixBackReferences()
+	if err != nil {
+		return nil, err
+	}
+	// With all the back references fixed, it's OK to resolve Vars.
+	err = ra.ResolveVars()
+	return ra.ResMap(), err
+}
+
+func (kt *KustTarget) shouldAddHashSuffixesToGeneratedResources() bool {
+	return kt.kustomization.GeneratorOptions == nil ||
+		!kt.kustomization.GeneratorOptions.DisableNameSuffixHash
+}
+
+// AccumulateTarget returns a new ResAccumulator,
+// holding customized resources and the data/rules used
+// to do so.  The name back references and vars are
+// not yet fixed.
+func (kt *KustTarget) AccumulateTarget() (
+	ra *ResAccumulator, err error) {
+	// TODO(monopole): Get rid of the KustomizationErrors accumulator.
+	// It's not consistently used, and complicates tests.
+	errs := &interror.KustomizationErrors{}
+	ra, errs = kt.accumulateBases()
+	resources, err := kt.rFactory.FromFiles(
+		kt.ldr, kt.kustomization.Resources)
+	if err != nil {
+		errs.Append(errors.Wrap(err, "rawResources failed to read Resources"))
+	}
+	if len(errs.Get()) > 0 {
+		return ra, errs
+	}
+	err = ra.MergeResourcesWithErrorOnIdCollision(resources)
+	if err != nil {
+		errs.Append(errors.Wrap(err, "MergeResourcesWithErrorOnIdCollision"))
+	}
+	tConfig, err := config.MakeTransformerConfig(
+		kt.ldr, kt.kustomization.Configurations)
+	if err != nil {
+		return nil, err
+	}
+	err = ra.MergeConfig(tConfig)
+	if err != nil {
+		errs.Append(errors.Wrap(err, "MergeConfig"))
+	}
+	err = ra.MergeVars(kt.kustomization.Vars)
+	if err != nil {
+		errs.Append(errors.Wrap(err, "MergeVars"))
+	}
+	crdTc, err := config.LoadConfigFromCRDs(kt.ldr, kt.kustomization.Crds)
+	if err != nil {
+		errs.Append(errors.Wrap(err, "LoadCRDs"))
+	}
+	err = ra.MergeConfig(crdTc)
+	if err != nil {
+		errs.Append(errors.Wrap(err, "merge CRDs"))
+	}
+	resMap, err := kt.generateConfigMapsAndSecrets(errs)
+	if err != nil {
+		errs.Append(errors.Wrap(err, "generateConfigMapsAndSecrets"))
+	}
+	err = ra.MergeResourcesWithOverride(resMap)
+	if err != nil {
+		return nil, err
+	}
+	patches, err := kt.rFactory.RF().SliceFromPatches(
+		kt.ldr, kt.kustomization.PatchesStrategicMerge)
+	if err != nil {
+		errs.Append(errors.Wrap(err, "SliceFromPatches"))
+	}
+	if len(errs.Get()) > 0 {
+		return nil, errs
+	}
+	t, err := kt.newTransformer(patches, ra.tConfig)
+	if err != nil {
+		return nil, err
+	}
+	err = ra.Transform(t)
+	if err != nil {
+		return nil, err
+	}
+	return ra, nil
+}
+
+func (kt *KustTarget) generateConfigMapsAndSecrets(
+	errs *interror.KustomizationErrors) (resmap.ResMap, error) {
+	kt.rFactory.Set(kt.ldr)
+	cms, err := kt.rFactory.NewResMapFromConfigMapArgs(
+		kt.kustomization.ConfigMapGenerator, kt.kustomization.GeneratorOptions)
+	if err != nil {
+		errs.Append(errors.Wrap(err, "NewResMapFromConfigMapArgs"))
+	}
+	secrets, err := kt.rFactory.NewResMapFromSecretArgs(
+		kt.kustomization.SecretGenerator, kt.kustomization.GeneratorOptions)
+	if err != nil {
+		errs.Append(errors.Wrap(err, "NewResMapFromSecretArgs"))
+	}
+	return resmap.MergeWithErrorOnIdCollision(cms, secrets)
+}
+
+// accumulateBases returns a new ResAccumulator
+// holding customized resources and the data/rules
+// used to customized them from only the _bases_
+// of this KustTarget.
+func (kt *KustTarget) accumulateBases() (
+	ra *ResAccumulator, errs *interror.KustomizationErrors) {
+	errs = &interror.KustomizationErrors{}
+	ra = MakeEmptyAccumulator()
+
+	for _, path := range kt.kustomization.Bases {
+		ldr, err := kt.ldr.New(path)
+		if err != nil {
+			errs.Append(errors.Wrap(err, "couldn't make loader for "+path))
+			continue
+		}
+		subKt, err := NewKustTarget(
+			ldr, kt.rFactory, kt.tFactory)
+		if err != nil {
+			errs.Append(errors.Wrap(err, "couldn't make target for "+path))
+			ldr.Cleanup()
+			continue
+		}
+		subRa, err := subKt.AccumulateTarget()
+		if err != nil {
+			errs.Append(errors.Wrap(err, "AccumulateTarget"))
+			ldr.Cleanup()
+			continue
+		}
+		err = ra.MergeAccumulator(subRa)
+		if err != nil {
+			errs.Append(errors.Wrap(err, path))
+		}
+		ldr.Cleanup()
+	}
+	return ra, errs
+}
+
+// newTransformer makes a Transformer that does a collection
+// of object transformations.
+func (kt *KustTarget) newTransformer(
+	patches []*resource.Resource, tConfig *config.TransformerConfig) (
+	transformers.Transformer, error) {
+	var r []transformers.Transformer
+	t, err := kt.tFactory.MakePatchTransformer(patches, kt.rFactory.RF())
+	if err != nil {
+		return nil, err
+	}
+	r = append(r, t)
+	r = append(r, transformers.NewNamespaceTransformer(
+		string(kt.kustomization.Namespace), tConfig.NameSpace))
+	t, err = transformers.NewNamePrefixSuffixTransformer(
+		string(kt.kustomization.NamePrefix),
+		string(kt.kustomization.NameSuffix),
+		tConfig.NamePrefix,
+	)
+	if err != nil {
+		return nil, err
+	}
+	r = append(r, t)
+	t, err = transformers.NewLabelsMapTransformer(
+		kt.kustomization.CommonLabels, tConfig.CommonLabels)
+	if err != nil {
+		return nil, err
+	}
+	r = append(r, t)
+	t, err = transformers.NewAnnotationsMapTransformer(
+		kt.kustomization.CommonAnnotations, tConfig.CommonAnnotations)
+	if err != nil {
+		return nil, err
+	}
+	r = append(r, t)
+	t, err = patchtransformer.NewPatchJson6902Factory(kt.ldr).
+		MakePatchJson6902Transformer(kt.kustomization.PatchesJson6902)
+	if err != nil {
+		return nil, err
+	}
+	r = append(r, t)
+	t, err = transformers.NewImageTransformer(kt.kustomization.Images)
+	if err != nil {
+		return nil, err
+	}
+	r = append(r, t)
+	return transformers.NewMultiTransformer(r), nil
+}
diff --git a/vendor/sigs.k8s.io/kustomize/pkg/target/resaccumulator.go b/vendor/sigs.k8s.io/kustomize/pkg/target/resaccumulator.go
new file mode 100644
index 00000000..b8c45015
--- /dev/null
+++ b/vendor/sigs.k8s.io/kustomize/pkg/target/resaccumulator.go
@@ -0,0 +1,161 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package target
+
+import (
+	"fmt"
+	"log"
+	"strings"
+
+	"sigs.k8s.io/kustomize/pkg/resid"
+	"sigs.k8s.io/kustomize/pkg/resmap"
+	"sigs.k8s.io/kustomize/pkg/transformers"
+	"sigs.k8s.io/kustomize/pkg/transformers/config"
+	"sigs.k8s.io/kustomize/pkg/types"
+)
+
+// ResAccumulator accumulates resources and the rules
+// used to customize those resources.
+// TODO(monopole): Move to "accumulator" package and make members private.
+// This will make a better separation between KustTarget, which should
+// be mainly concerned with data loading, and this class, which could
+// become the home of all transformation data and logic.
+type ResAccumulator struct {
+	resMap  resmap.ResMap
+	tConfig *config.TransformerConfig
+	varSet  types.VarSet
+}
+
+func MakeEmptyAccumulator() *ResAccumulator {
+	ra := &ResAccumulator{}
+	ra.resMap = make(resmap.ResMap)
+	ra.tConfig = &config.TransformerConfig{}
+	ra.varSet = types.VarSet{}
+	return ra
+}
+
+// ResMap returns a copy of the internal resMap.
+func (ra *ResAccumulator) ResMap() resmap.ResMap {
+	result := make(resmap.ResMap)
+	for k, v := range ra.resMap {
+		result[k] = v
+	}
+	return result
+}
+
+// Vars returns a copy of underlying vars.
+func (ra *ResAccumulator) Vars() []types.Var {
+	return ra.varSet.Set()
+}
+
+func (ra *ResAccumulator) MergeResourcesWithErrorOnIdCollision(
+	resources resmap.ResMap) (err error) {
+	ra.resMap, err = resmap.MergeWithErrorOnIdCollision(
+		resources, ra.resMap)
+	return err
+}
+
+func (ra *ResAccumulator) MergeResourcesWithOverride(
+	resources resmap.ResMap) (err error) {
+	ra.resMap, err = resmap.MergeWithOverride(
+		ra.resMap, resources)
+	return err
+}
+
+func (ra *ResAccumulator) MergeConfig(
+	tConfig *config.TransformerConfig) (err error) {
+	ra.tConfig, err = ra.tConfig.Merge(tConfig)
+	return err
+}
+
+func (ra *ResAccumulator) MergeVars(incoming []types.Var) error {
+	return ra.varSet.MergeSlice(incoming)
+}
+
+func (ra *ResAccumulator) MergeAccumulator(other *ResAccumulator) (err error) {
+	err = ra.MergeResourcesWithErrorOnIdCollision(other.resMap)
+	if err != nil {
+		return err
+	}
+	err = ra.MergeConfig(other.tConfig)
+	if err != nil {
+		return err
+	}
+	return ra.varSet.MergeSet(&other.varSet)
+}
+
+// makeVarReplacementMap returns a map of Var names to
+// their final values. The values are strings intended
+// for substitution wherever the $(var.Name) occurs.
+func (ra *ResAccumulator) makeVarReplacementMap() (map[string]string, error) {
+	result := map[string]string{}
+	for _, v := range ra.Vars() {
+		matched := ra.resMap.GetMatchingIds(
+			resid.NewResId(v.ObjRef.GVK(), v.ObjRef.Name).GvknEquals)
+		if len(matched) > 1 {
+			return nil, fmt.Errorf(
+				"found %d resId matches for var %s "+
+					"(unable to disambiguate)",
+				len(matched), v)
+		}
+		if len(matched) == 1 {
+			s, err := ra.resMap[matched[0]].GetFieldValue(v.FieldRef.FieldPath)
+			if err != nil {
+				return nil, fmt.Errorf(
+					"field specified in var '%v' "+
+						"not found in corresponding resource", v)
+			}
+			result[v.Name] = s
+		} else {
+			return nil, fmt.Errorf(
+				"var '%v' cannot be mapped to a field "+
+					"in the set of known resources", v)
+		}
+	}
+	return result, nil
+}
+
+func (ra *ResAccumulator) Transform(t transformers.Transformer) error {
+	return t.Transform(ra.resMap)
+}
+
+func (ra *ResAccumulator) ResolveVars() error {
+	replacementMap, err := ra.makeVarReplacementMap()
+	if err != nil {
+		return err
+	}
+	if len(replacementMap) == 0 {
+		return nil
+	}
+	t := transformers.NewRefVarTransformer(
+		replacementMap, ra.tConfig.VarReference)
+	err = ra.Transform(t)
+	if len(t.UnusedVars()) > 0 {
+		log.Printf(
+			"well-defined vars that were never replaced: %s\n",
+			strings.Join(t.UnusedVars(), ","))
+	}
+	return err
+}
+
+func (ra *ResAccumulator) FixBackReferences() (err error) {
+	if ra.tConfig.NameReference == nil {
+		return nil
+	}
+	return ra.Transform(transformers.NewNameReferenceTransformer(
+		ra.tConfig.NameReference))
+}
diff --git a/vendor/sigs.k8s.io/kustomize/pkg/transformers/config/defaultconfig/commonannotations.go b/vendor/sigs.k8s.io/kustomize/pkg/transformers/config/defaultconfig/commonannotations.go
new file mode 100644
index 00000000..27545590
--- /dev/null
+++ b/vendor/sigs.k8s.io/kustomize/pkg/transformers/config/defaultconfig/commonannotations.go
@@ -0,0 +1,60 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package defaultconfig
+
+const commonAnnotationFieldSpecs = `
+commonAnnotations:
+- path: metadata/annotations
+  create: true
+
+- path: spec/template/metadata/annotations
+  create: true
+  version: v1
+  kind: ReplicationController
+
+- path: spec/template/metadata/annotations
+  create: true
+  kind: Deployment
+
+- path: spec/template/metadata/annotations
+  create: true
+  kind: ReplicaSet
+
+- path: spec/template/metadata/annotations
+  create: true
+  kind: DaemonSet
+
+- path: spec/template/metadata/annotations
+  create: true
+  kind: StatefulSet
+
+- path: spec/template/metadata/annotations
+  create: true
+  group: batch
+  kind: Job
+
+- path: spec/jobTemplate/metadata/annotations
+  create: true
+  group: batch
+  kind: CronJob
+
+- path: spec/jobTemplate/spec/template/metadata/annotations
+  create: true
+  group: batch
+  kind: CronJob
+
+`
diff --git a/vendor/sigs.k8s.io/kustomize/pkg/transformers/config/defaultconfig/commonlabels.go b/vendor/sigs.k8s.io/kustomize/pkg/transformers/config/defaultconfig/commonlabels.go
new file mode 100644
index 00000000..66943c1e
--- /dev/null
+++ b/vendor/sigs.k8s.io/kustomize/pkg/transformers/config/defaultconfig/commonlabels.go
@@ -0,0 +1,162 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package defaultconfig
+
+const commonLabelFieldSpecs = `
+commonLabels:
+- path: metadata/labels
+  create: true
+
+- path: spec/selector
+  create: true
+  version: v1
+  kind: Service
+
+- path: spec/selector
+  create: true
+  version: v1
+  kind: ReplicationController
+
+- path: spec/template/metadata/labels
+  create: true
+  version: v1
+  kind: ReplicationController
+
+- path: spec/selector/matchLabels
+  create: true
+  kind: Deployment
+
+- path: spec/template/metadata/labels
+  create: true
+  kind: Deployment
+
+- path: spec/template/spec/affinity/podAffinity/preferredDuringSchedulingIgnoredDuringExecution/podAffinityTerm/labelSelector/matchLabels
+  create: false
+  group: apps
+  kind: Deployment
+
+- path: spec/template/spec/affinity/podAffinity/requiredDuringSchedulingIgnoredDuringExecution/labelSelector/matchLabels
+  create: false
+  group: apps
+  kind: Deployment
+
+- path: spec/template/spec/affinity/podAntiAffinity/preferredDuringSchedulingIgnoredDuringExecution/podAffinityTerm/labelSelector/matchLabels
+  create: false
+  group: apps
+  kind: Deployment
+
+- path: spec/template/spec/affinity/podAntiAffinity/requiredDuringSchedulingIgnoredDuringExecution/labelSelector/matchLabels
+  create: false
+  group: apps
+  kind: Deployment
+
+- path: spec/selector/matchLabels
+  create: true
+  kind: ReplicaSet
+
+- path: spec/template/metadata/labels
+  create: true
+  kind: ReplicaSet
+
+- path: spec/selector/matchLabels
+  create: true
+  kind: DaemonSet
+
+- path: spec/template/metadata/labels
+  create: true
+  kind: DaemonSet
+
+- path: spec/selector/matchLabels
+  create: true
+  group: apps
+  kind: StatefulSet
+
+- path: spec/template/metadata/labels
+  create: true
+  group: apps
+  kind: StatefulSet
+
+- path: spec/template/spec/affinity/podAffinity/preferredDuringSchedulingIgnoredDuringExecution/podAffinityTerm/labelSelector/matchLabels
+  create: false
+  group: apps
+  kind: StatefulSet
+
+- path: spec/template/spec/affinity/podAffinity/requiredDuringSchedulingIgnoredDuringExecution/labelSelector/matchLabels
+  create: false
+  group: apps
+  kind: StatefulSet
+
+- path: spec/template/spec/affinity/podAntiAffinity/preferredDuringSchedulingIgnoredDuringExecution/podAffinityTerm/labelSelector/matchLabels
+  create: false
+  group: apps
+  kind: StatefulSet
+
+- path: spec/template/spec/affinity/podAntiAffinity/requiredDuringSchedulingIgnoredDuringExecution/labelSelector/matchLabels
+  create: false
+  group: apps
+  kind: StatefulSet
+
+- path: spec/volumeClaimTemplates/metadata/labels
+  create: true
+  group: apps
+  kind: StatefulSet
+
+- path: spec/selector/matchLabels
+  create: false
+  group: batch
+  kind: Job
+
+- path: spec/template/metadata/labels
+  create: true
+  group: batch
+  kind: Job
+
+- path: spec/jobTemplate/spec/selector/matchLabels
+  create: false
+  group: batch
+  kind: CronJob
+
+- path: spec/jobTemplate/metadata/labels
+  create: true
+  group: batch
+  kind: CronJob
+
+- path: spec/jobTemplate/spec/template/metadata/labels
+  create: true
+  group: batch
+  kind: CronJob
+
+- path: spec/selector/matchLabels
+  create: false
+  group: policy
+  kind: PodDisruptionBudget
+
+- path: spec/podSelector/matchLabels
+  create: false
+  group: networking.k8s.io
+  kind: NetworkPolicy
+
+- path: spec/ingress/from/podSelector/matchLabels
+  create: false
+  group: networking.k8s.io
+  kind: NetworkPolicy
+
+- path: spec/egress/to/podSelector/matchLabels
+  create: false
+  group: networking.k8s.io
+  kind: NetworkPolicy
+`
diff --git a/vendor/sigs.k8s.io/kustomize/pkg/transformers/config/defaultconfig/defaultconfig.go b/vendor/sigs.k8s.io/kustomize/pkg/transformers/config/defaultconfig/defaultconfig.go
new file mode 100644
index 00000000..d96639a8
--- /dev/null
+++ b/vendor/sigs.k8s.io/kustomize/pkg/transformers/config/defaultconfig/defaultconfig.go
@@ -0,0 +1,49 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package defaultconfig provides the default
+// transformer configurations
+package defaultconfig
+
+import (
+	"bytes"
+)
+
+// GetDefaultFieldSpecs returns default fieldSpecs.
+func GetDefaultFieldSpecs() []byte {
+	configData := [][]byte{
+		[]byte(namePrefixFieldSpecs),
+		[]byte(commonLabelFieldSpecs),
+		[]byte(commonAnnotationFieldSpecs),
+		[]byte(namespaceFieldSpecs),
+		[]byte(varReferenceFieldSpecs),
+		[]byte(nameReferenceFieldSpecs),
+	}
+	return bytes.Join(configData, []byte("\n"))
+}
+
+// GetDefaultFieldSpecsAsMap returns default fieldSpecs
+// as a string->string map.
+func GetDefaultFieldSpecsAsMap() map[string]string {
+	result := make(map[string]string)
+	result["nameprefix"] = namePrefixFieldSpecs
+	result["commonlabels"] = commonLabelFieldSpecs
+	result["commonannotations"] = commonAnnotationFieldSpecs
+	result["namespace"] = namespaceFieldSpecs
+	result["varreference"] = varReferenceFieldSpecs
+	result["namereference"] = nameReferenceFieldSpecs
+	return result
+}
diff --git a/vendor/sigs.k8s.io/kustomize/pkg/transformers/config/defaultconfig/nameprefix.go b/vendor/sigs.k8s.io/kustomize/pkg/transformers/config/defaultconfig/nameprefix.go
new file mode 100644
index 00000000..94fe07a4
--- /dev/null
+++ b/vendor/sigs.k8s.io/kustomize/pkg/transformers/config/defaultconfig/nameprefix.go
@@ -0,0 +1,24 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package defaultconfig
+
+const (
+	namePrefixFieldSpecs = `
+namePrefix:
+- path: metadata/name
+`
+)
diff --git a/vendor/sigs.k8s.io/kustomize/pkg/transformers/config/defaultconfig/namereference.go b/vendor/sigs.k8s.io/kustomize/pkg/transformers/config/defaultconfig/namereference.go
new file mode 100644
index 00000000..35d4b7de
--- /dev/null
+++ b/vendor/sigs.k8s.io/kustomize/pkg/transformers/config/defaultconfig/namereference.go
@@ -0,0 +1,317 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package defaultconfig
+
+const (
+	nameReferenceFieldSpecs = `
+nameReference:
+- kind: Deployment
+  fieldSpecs:
+  - path: spec/scaleTargetRef/name
+    kind: HorizontalPodAutoscaler
+
+- kind: ReplicationController
+  fieldSpecs:
+  - path: spec/scaleTargetRef/name
+    kind: HorizontalPodAutoscaler
+
+- kind: ReplicaSet
+  fieldSpecs:
+  - path: spec/scaleTargetRef/name
+    kind: HorizontalPodAutoscaler
+
+- kind: ConfigMap
+  version: v1
+  fieldSpecs:
+  - path: spec/volumes/configMap/name
+    version: v1
+    kind: Pod
+  - path: spec/containers/env/valueFrom/configMapKeyRef/name
+    version: v1
+    kind: Pod
+  - path: spec/initContainers/env/valueFrom/configMapKeyRef/name
+    version: v1
+    kind: Pod
+  - path: spec/containers/envFrom/configMapRef/name
+    version: v1
+    kind: Pod
+  - path: spec/initContainers/envFrom/configMapRef/name
+    version: v1
+    kind: Pod
+  - path: spec/template/spec/volumes/configMap/name
+    kind: Deployment
+  - path: spec/template/spec/containers/env/valueFrom/configMapKeyRef/name
+    kind: Deployment
+  - path: spec/template/spec/initContainers/env/valueFrom/configMapKeyRef/name
+    kind: Deployment
+  - path: spec/template/spec/containers/envFrom/configMapRef/name
+    kind: Deployment
+  - path: spec/template/spec/initContainers/envFrom/configMapRef/name
+    kind: Deployment
+  - path: spec/template/spec/volumes/projected/sources/configMap/name
+    kind: Deployment
+  - path: spec/template/spec/volumes/configMap/name
+    kind: ReplicaSet
+  - path: spec/template/spec/containers/env/valueFrom/configMapKeyRef/name
+    kind: ReplicaSet
+  - path: spec/template/spec/initContainers/env/valueFrom/configMapKeyRef/name
+    kind: ReplicaSet
+  - path: spec/template/spec/containers/envFrom/configMapRef/name
+    kind: ReplicaSet
+  - path: spec/template/spec/initContainers/envFrom/configMapRef/name
+    kind: ReplicaSet
+  - path: spec/template/spec/volumes/configMap/name
+    kind: DaemonSet
+  - path: spec/template/spec/containers/env/valueFrom/configMapKeyRef/name
+    kind: DaemonSet
+  - path: spec/template/spec/initContainers/env/valueFrom/configMapKeyRef/name
+    kind: DaemonSet
+  - path: spec/template/spec/containers/envFrom/configMapRef/name
+    kind: DaemonSet
+  - path: spec/template/spec/initContainers/envFrom/configMapRef/name
+    kind: DaemonSet
+  - path: spec/template/spec/volumes/configMap/name
+    kind: StatefulSet
+  - path: spec/template/spec/containers/env/valueFrom/configMapKeyRef/name
+    kind: StatefulSet
+  - path: spec/template/spec/initContainers/env/valueFrom/configMapKeyRef/name
+    kind: StatefulSet
+  - path: spec/template/spec/containers/envFrom/configMapRef/name
+    kind: StatefulSet
+  - path: spec/template/spec/initContainers/envFrom/configMapRef/name
+    kind: StatefulSet
+  - path: spec/template/spec/volumes/projected/sources/configMap/name
+    kind: StatefulSet
+  - path: spec/template/spec/volumes/configMap/name
+    kind: Job
+  - path: spec/template/spec/containers/env/valueFrom/configMapKeyRef/name
+    kind: Job
+  - path: spec/template/spec/initContainers/env/valueFrom/configMapKeyRef/name
+    kind: Job
+  - path: spec/template/spec/containers/envFrom/configMapRef/name
+    kind: Job
+  - path: spec/template/spec/initContainers/envFrom/configMapRef/name
+    kind: Job
+  - path: spec/jobTemplate/spec/template/spec/volumes/configMap/name
+    kind: CronJob
+  - path: spec/jobTemplate/spec/template/spec/containers/env/valueFrom/configMapKeyRef/name
+    kind: CronJob
+  - path: spec/jobTemplate/spec/template/spec/initContainers/env/valueFrom/configMapKeyRef/name
+    kind: CronJob
+  - path: spec/jobTemplate/spec/template/spec/containers/envFrom/configMapRef/name
+    kind: CronJob
+  - path: spec/jobTemplate/spec/template/spec/initContainers/envFrom/configmapRef/name
+    kind: CronJob
+
+- kind: Secret
+  version: v1
+  fieldSpecs:
+  - path: spec/volumes/secret/secretName
+    version: v1
+    kind: Pod
+  - path: spec/containers/env/valueFrom/secretKeyRef/name
+    version: v1
+    kind: Pod
+  - path: spec/initContainers/env/valueFrom/secretKeyRef/name
+    version: v1
+    kind: Pod
+  - path: spec/containers/envFrom/secretRef/name
+    version: v1
+    kind: Pod
+  - path: spec/initContainers/envFrom/secretRef/name
+    version: v1
+    kind: Pod
+  - path: spec/imagePullSecrets/name
+    version: v1
+    kind: Pod
+  - path: spec/template/spec/volumes/secret/secretName
+    kind: Deployment
+  - path: spec/template/spec/containers/env/valueFrom/secretKeyRef/name
+    kind: Deployment
+  - path: spec/template/spec/initContainers/env/valueFrom/secretKeyRef/name
+    kind: Deployment
+  - path: spec/template/spec/containers/envFrom/secretRef/name
+    kind: Deployment
+  - path: spec/template/spec/initContainers/envFrom/secretRef/name
+    kind: Deployment
+  - path: spec/template/spec/imagePullSecrets/name
+    kind: Deployment
+  - path: spec/template/spec/volumes/projected/sources/secret/name
+    kind: Deployment
+  - path: spec/template/spec/volumes/secret/secretName
+    kind: ReplicaSet
+  - path: spec/template/spec/containers/env/valueFrom/secretKeyRef/name
+    kind: ReplicaSet
+  - path: spec/template/spec/initContainers/env/valueFrom/secretKeyRef/name
+    kind: ReplicaSet
+  - path: spec/template/spec/containers/envFrom/secretRef/name
+    kind: ReplicaSet
+  - path: spec/template/spec/initContainers/envFrom/secretRef/name
+    kind: ReplicaSet
+  - path: spec/template/spec/imagePullSecrets/name
+    kind: ReplicaSet
+  - path: spec/template/spec/volumes/secret/secretName
+    kind: DaemonSet
+  - path: spec/template/spec/containers/env/valueFrom/secretKeyRef/name
+    kind: DaemonSet
+  - path: spec/template/spec/initContainers/env/valueFrom/secretKeyRef/name
+    kind: DaemonSet
+  - path: spec/template/spec/containers/envFrom/secretRef/name
+    kind: DaemonSet
+  - path: spec/template/spec/initContainers/envFrom/secretRef/name
+    kind: DaemonSet
+  - path: spec/template/spec/imagePullSecrets/name
+    kind: DaemonSet
+  - path: spec/template/spec/volumes/secret/secretName
+    kind: StatefulSet
+  - path: spec/template/spec/containers/env/valueFrom/secretKeyRef/name
+    kind: StatefulSet
+  - path: spec/template/spec/initContainers/env/valueFrom/secretKeyRef/name
+    kind: StatefulSet
+  - path: spec/template/spec/containers/envFrom/secretRef/name
+    kind: StatefulSet
+  - path: spec/template/spec/initContainers/envFrom/secretRef/name
+    kind: StatefulSet
+  - path: spec/template/spec/imagePullSecrets/name
+    kind: StatefulSet
+  - path: spec/template/spec/volumes/projected/sources/secret/name
+    kind: StatefulSet
+  - path: spec/template/spec/volumes/secret/secretName
+    kind: Job
+  - path: spec/template/spec/containers/env/valueFrom/secretKeyRef/name
+    kind: Job
+  - path: spec/template/spec/initContainers/env/valueFrom/secretKeyRef/name
+    kind: Job
+  - path: spec/template/spec/containers/envFrom/secretRef/name
+    kind: Job
+  - path: spec/template/spec/initContainers/envFrom/secretRef/name
+    kind: Job
+  - path: spec/template/spec/imagePullSecrets/name
+    kind: Job
+  - path: spec/jobTemplate/spec/template/spec/volumes/secret/secretName
+    kind: CronJob
+  - path: spec/jobTemplate/spec/template/spec/containers/env/valueFrom/secretKeyRef/name
+    kind: CronJob
+  - path: spec/jobTemplate/spec/template/spec/initContainers/env/valueFrom/secretKeyRef/name
+    kind: CronJob
+  - path: spec/jobTemplate/spec/template/spec/containers/envFrom/secretRef/name
+    kind: CronJob
+  - path: spec/jobTemplate/spec/template/spec/initContainers/envFrom/secretRef/name
+    kind: CronJob
+  - path: spec/jobTemplate/spec/template/spec/imagePullSecrets/name
+    kind: CronJob
+  - path: spec/tls/secretName
+    kind: Ingress
+  - path: metadata/annotations/ingress.kubernetes.io\/auth-secret
+    kind: Ingress
+  - path: metadata/annotations/nginx.ingress.kubernetes.io\/auth-secret
+    kind: Ingress
+  - path: imagePullSecrets/name
+    kind: ServiceAccount
+  - path: parameters/secretName
+    kind: StorageClass
+  - path: parameters/adminSecretName
+    kind: StorageClass
+  - path: parameters/userSecretName
+    kind: StorageClass
+  - path: parameters/secretRef
+    kind: StorageClass
+  - path: rules/resourceNames
+    kind: Role
+  - path: rules/resourceNames
+    kind: ClusterRole
+
+- kind: Service
+  version: v1
+  fieldSpecs:
+  - path: spec/serviceName
+    kind: StatefulSet
+    group: apps
+  - path: spec/rules/http/paths/backend/serviceName
+    kind: Ingress
+  - path: spec/backend/serviceName
+    kind: Ingress
+  - path: spec/service/name
+    kind: APIService
+    group: apiregistration.k8s.io
+
+- kind: Role
+  group: rbac.authorization.k8s.io
+  fieldSpecs:
+  - path: roleRef/name
+    kind: RoleBinding
+    group: rbac.authorization.k8s.io
+
+- kind: ClusterRole
+  group: rbac.authorization.k8s.io
+  fieldSpecs:
+  - path: roleRef/name
+    kind: RoleBinding
+    group: rbac.authorization.k8s.io
+  - path: roleRef/name
+    kind: ClusterRoleBinding
+    group: rbac.authorization.k8s.io
+
+- kind: ServiceAccount
+  version: v1
+  fieldSpecs:
+  - path: subjects/name
+    kind: RoleBinding
+    group: rbac.authorization.k8s.io
+  - path: subjects/name
+    kind: ClusterRoleBinding
+    group: rbac.authorization.k8s.io
+  - path: spec/serviceAccountName
+    kind: Pod
+  - path: spec/template/spec/serviceAccountName
+    kind: StatefulSet
+  - path: spec/template/spec/serviceAccountName
+    kind: Deployment
+  - path: spec/template/spec/serviceAccountName
+    kind: ReplicationController
+  - path: spec/jobTemplate/spec/template/spec/serviceAccountName
+    kind: CronJob
+  - path: spec/template/spec/serviceAccountName
+    kind: job
+  - path: spec/template/spec/serviceAccountName
+    kind: DaemonSet
+
+- kind: PersistentVolumeClaim
+  version: v1
+  fieldSpecs:
+  - path: spec/volumes/persistentVolumeClaim/claimName
+    kind: Pod
+  - path: spec/template/spec/volumes/persistentVolumeClaim/claimName
+    kind: StatefulSet
+  - path: spec/template/spec/volumes/persistentVolumeClaim/claimName
+    kind: Deployment
+  - path: spec/template/spec/volumes/persistentVolumeClaim/claimName
+    kind: ReplicationController
+  - path: spec/jobTemplate/spec/template/spec/volumes/persistentVolumeClaim/claimName
+    kind: CronJob
+  - path: spec/template/spec/volumes/persistentVolumeClaim/claimName
+    kind: Job
+  - path: spec/template/spec/volumes/persistentVolumeClaim/claimName
+    kind: DaemonSet
+
+- kind: PersistentVolume
+  version: v1
+  fieldSpecs:
+  - path: spec/volumeName
+    kind: PersistentVolumeClaim
+`
+)
diff --git a/vendor/sigs.k8s.io/kustomize/pkg/transformers/config/defaultconfig/namespace.go b/vendor/sigs.k8s.io/kustomize/pkg/transformers/config/defaultconfig/namespace.go
new file mode 100644
index 00000000..431eb076
--- /dev/null
+++ b/vendor/sigs.k8s.io/kustomize/pkg/transformers/config/defaultconfig/namespace.go
@@ -0,0 +1,25 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package defaultconfig
+
+const (
+	namespaceFieldSpecs = `
+namespace:
+- path: metadata/namespace
+  create: true
+`
+)
diff --git a/vendor/sigs.k8s.io/kustomize/pkg/transformers/config/defaultconfig/varreference.go b/vendor/sigs.k8s.io/kustomize/pkg/transformers/config/defaultconfig/varreference.go
new file mode 100644
index 00000000..71953f57
--- /dev/null
+++ b/vendor/sigs.k8s.io/kustomize/pkg/transformers/config/defaultconfig/varreference.go
@@ -0,0 +1,162 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package defaultconfig
+
+const (
+	varReferenceFieldSpecs = `
+varReference:
+- path: spec/template/spec/initContainers/command
+  kind: StatefulSet
+
+- path: spec/template/spec/containers/command
+  kind: StatefulSet
+
+- path: spec/template/spec/initContainers/command
+  kind: Deployment
+
+- path: spec/template/spec/containers/command
+  kind: Deployment
+
+- path: spec/template/spec/initContainers/command
+  kind: DaemonSet
+
+- path: spec/template/spec/containers/command
+  kind: DaemonSet
+
+- path: spec/template/spec/containers/command
+  kind: Job
+
+- path: spec/jobTemplate/spec/template/spec/containers/command
+  kind: CronJob
+
+- path: spec/template/spec/initContainers/args
+  kind: StatefulSet
+ 
+- path: spec/template/spec/containers/args
+  kind: StatefulSet
+
+- path: spec/template/spec/initContainers/args
+  kind: Deployment
+
+- path: spec/template/spec/containers/args
+  kind: Deployment
+
+- path: spec/template/spec/initContainers/args
+  kind: DaemonSet
+
+- path: spec/template/spec/containers/args
+  kind: DaemonSet
+
+- path: spec/template/spec/containers/args
+  kind: Job
+
+- path: spec/jobTemplate/spec/template/spec/containers/args
+  kind: CronJob
+
+- path: spec/template/spec/initContainers/env/value
+  kind: StatefulSet
+
+- path: spec/template/spec/containers/env/value
+  kind: StatefulSet
+
+- path: spec/template/spec/initContainers/env/value
+  kind: Deployment
+
+- path: spec/template/spec/containers/env/value
+  kind: Deployment
+
+- path: spec/template/spec/initContainers/env/value
+  kind: DaemonSet
+
+- path: spec/template/spec/containers/env/value
+  kind: DaemonSet
+
+- path: spec/template/spec/containers/env/value
+  kind: Job
+
+- path: spec/jobTemplate/spec/template/spec/containers/env/value
+  kind: CronJob
+
+- path: spec/containers/command
+  kind: Pod
+
+- path: spec/containers/args
+  kind: Pod
+
+- path: spec/containers/env/value
+  kind: Pod
+
+- path: spec/initContainers/command
+  kind: Pod
+
+- path: spec/initContainers/args
+  kind: Pod
+
+- path: spec/initContainers/env/value
+  kind: Pod
+
+- path: spec/rules/host
+  kind: Ingress
+
+- path: spec/tls/hosts
+  kind: Ingress
+
+- path: spec/template/spec/containers/volumeMounts/mountPath
+  kind: StatefulSet
+
+- path: spec/template/spec/initContainers/volumeMounts/mountPath
+  kind: StatefulSet
+
+- path: spec/containers/volumeMounts/mountPath
+  kind: Pod
+
+- path: spec/initContainers/volumeMounts/mountPath
+  kind: Pod
+
+- path: spec/template/spec/containers/volumeMounts/mountPath
+  kind: ReplicaSet
+
+- path: spec/template/spec/initContainers/volumeMounts/mountPath
+  kind: ReplicaSet
+
+- path: spec/template/spec/containers/volumeMounts/mountPath
+  kind: Job
+
+- path: spec/template/spec/initContainers/volumeMounts/mountPath
+  kind: Job
+
+- path: spec/template/spec/containers/volumeMounts/mountPath
+  kind: CronJob
+
+- path: spec/template/spec/initContainers/volumeMounts/mountPath
+  kind: CronJob
+
+- path: spec/template/spec/containers/volumeMounts/mountPath
+  kind: DaemonSet
+
+- path: spec/template/spec/initContainers/volumeMounts/mountPath
+  kind: DaemonSet
+
+- path: spec/template/spec/containers/volumeMounts/mountPath
+  kind: Deployment
+
+- path: spec/template/spec/initContainers/volumeMounts/mountPath
+  kind: Deployment
+
+- path: metadata/labels
+`
+)
diff --git a/vendor/sigs.k8s.io/kustomize/pkg/transformers/config/factory.go b/vendor/sigs.k8s.io/kustomize/pkg/transformers/config/factory.go
new file mode 100644
index 00000000..d0ea0d1d
--- /dev/null
+++ b/vendor/sigs.k8s.io/kustomize/pkg/transformers/config/factory.go
@@ -0,0 +1,87 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package config
+
+import (
+	"log"
+
+	"github.com/ghodss/yaml"
+	"sigs.k8s.io/kustomize/pkg/ifc"
+)
+
+// Factory makes instances of TransformerConfig.
+type Factory struct {
+	ldr ifc.Loader
+}
+
+// MakeTransformerConfig returns a merger of custom config,
+// if any, with default config.
+func MakeTransformerConfig(
+	ldr ifc.Loader, paths []string) (*TransformerConfig, error) {
+	t1 := MakeDefaultConfig()
+	if len(paths) == 0 {
+		return t1, nil
+	}
+	t2, err := NewFactory(ldr).FromFiles(paths)
+	if err != nil {
+		return nil, err
+	}
+	return t1.Merge(t2)
+}
+
+func NewFactory(l ifc.Loader) *Factory {
+	return &Factory{ldr: l}
+}
+
+func (tf *Factory) loader() ifc.Loader {
+	if tf.ldr.(ifc.Loader) == nil {
+		log.Fatal("no loader")
+	}
+	return tf.ldr
+}
+
+// FromFiles returns a TranformerConfig object from a list of files
+func (tf *Factory) FromFiles(
+	paths []string) (*TransformerConfig, error) {
+	result := &TransformerConfig{}
+	for _, path := range paths {
+		data, err := tf.loader().Load(path)
+		if err != nil {
+			return nil, err
+		}
+		t, err := makeTransformerConfigFromBytes(data)
+		if err != nil {
+			return nil, err
+		}
+		result, err = result.Merge(t)
+		if err != nil {
+			return nil, err
+		}
+	}
+	return result, nil
+}
+
+// makeTransformerConfigFromBytes returns a TransformerConfig object from bytes
+func makeTransformerConfigFromBytes(data []byte) (*TransformerConfig, error) {
+	var t TransformerConfig
+	err := yaml.Unmarshal(data, &t)
+	if err != nil {
+		return nil, err
+	}
+	t.sortFields()
+	return &t, nil
+}
diff --git a/vendor/sigs.k8s.io/kustomize/pkg/transformers/config/factorycrd.go b/vendor/sigs.k8s.io/kustomize/pkg/transformers/config/factorycrd.go
new file mode 100644
index 00000000..66a24dc8
--- /dev/null
+++ b/vendor/sigs.k8s.io/kustomize/pkg/transformers/config/factorycrd.go
@@ -0,0 +1,201 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package config
+
+import (
+	"encoding/json"
+	"strings"
+
+	"github.com/ghodss/yaml"
+	"github.com/go-openapi/spec"
+	"github.com/pkg/errors"
+	"k8s.io/kube-openapi/pkg/common"
+	"sigs.k8s.io/kustomize/pkg/gvk"
+	"sigs.k8s.io/kustomize/pkg/ifc"
+)
+
+type myProperties map[string]spec.Schema
+type nameToApiMap map[string]common.OpenAPIDefinition
+
+// LoadConfigFromCRDs parse CRD schemas from paths into a TransformerConfig
+func LoadConfigFromCRDs(
+	ldr ifc.Loader, paths []string) (*TransformerConfig, error) {
+	tc := MakeEmptyConfig()
+	for _, path := range paths {
+		content, err := ldr.Load(path)
+		if err != nil {
+			return nil, err
+		}
+		m, err := makeNameToApiMap(content)
+		if err != nil {
+			return nil, errors.Wrapf(err, "unable to parse open API definition from '%s'", path)
+		}
+		otherTc, err := makeConfigFromApiMap(m)
+		if err != nil {
+			return nil, err
+		}
+		tc, err = tc.Merge(otherTc)
+		if err != nil {
+			return nil, err
+		}
+	}
+	return tc, nil
+}
+
+func makeNameToApiMap(content []byte) (result nameToApiMap, err error) {
+	if content[0] == '{' {
+		err = json.Unmarshal(content, &result)
+	} else {
+		err = yaml.Unmarshal(content, &result)
+	}
+	return
+}
+
+func makeConfigFromApiMap(m nameToApiMap) (*TransformerConfig, error) {
+	result := MakeEmptyConfig()
+	for name, api := range m {
+		if !looksLikeAk8sType(api.Schema.SchemaProps.Properties) {
+			continue
+		}
+		tc := MakeEmptyConfig()
+		err := loadCrdIntoConfig(
+			tc, makeGvkFromTypeName(name), m, name, []string{})
+		if err != nil {
+			return result, err
+		}
+		result, err = result.Merge(tc)
+		if err != nil {
+			return result, err
+		}
+	}
+	return result, nil
+}
+
+// TODO: Get Group and Version for CRD from the
+// openAPI definition once
+// "x-kubernetes-group-version-kind" is available in CRD
+func makeGvkFromTypeName(n string) gvk.Gvk {
+	names := strings.Split(n, ".")
+	kind := names[len(names)-1]
+	return gvk.Gvk{Kind: kind}
+}
+
+func looksLikeAk8sType(properties myProperties) bool {
+	_, ok := properties["kind"]
+	if !ok {
+		return false
+	}
+	_, ok = properties["apiVersion"]
+	if !ok {
+		return false
+	}
+	_, ok = properties["metadata"]
+	if !ok {
+		return false
+	}
+	return true
+}
+
+const (
+	// "x-kubernetes-annotation": ""
+	xAnnotation = "x-kubernetes-annotation"
+
+	// "x-kubernetes-label-selector": ""
+	xLabelSelector = "x-kubernetes-label-selector"
+
+	// "x-kubernetes-identity": ""
+	xIdentity = "x-kubernetes-identity"
+
+	// "x-kubernetes-object-ref-api-version": <apiVersion name>
+	xVersion = "x-kubernetes-object-ref-api-version"
+
+	// "x-kubernetes-object-ref-kind": <kind name>
+	xKind = "x-kubernetes-object-ref-kind"
+
+	// "x-kubernetes-object-ref-name-key": "name"
+	// default is "name"
+	xNameKey = "x-kubernetes-object-ref-name-key"
+)
+
+// loadCrdIntoConfig loads a CRD spec into a TransformerConfig
+func loadCrdIntoConfig(
+	theConfig *TransformerConfig, theGvk gvk.Gvk, theMap nameToApiMap,
+	typeName string, path []string) (err error) {
+	api, ok := theMap[typeName]
+	if !ok {
+		return nil
+	}
+	for propName, property := range api.Schema.SchemaProps.Properties {
+		_, annotate := property.Extensions.GetString(xAnnotation)
+		if annotate {
+			err = theConfig.AddAnnotationFieldSpec(
+				makeFs(theGvk, append(path, propName)))
+			if err != nil {
+				return
+			}
+		}
+		_, label := property.Extensions.GetString(xLabelSelector)
+		if label {
+			err = theConfig.AddLabelFieldSpec(
+				makeFs(theGvk, append(path, propName)))
+			if err != nil {
+				return
+			}
+		}
+		_, identity := property.Extensions.GetString(xIdentity)
+		if identity {
+			err = theConfig.AddPrefixFieldSpec(
+				makeFs(theGvk, append(path, propName)))
+			if err != nil {
+				return
+			}
+		}
+		version, ok := property.Extensions.GetString(xVersion)
+		if ok {
+			kind, ok := property.Extensions.GetString(xKind)
+			if ok {
+				nameKey, ok := property.Extensions.GetString(xNameKey)
+				if !ok {
+					nameKey = "name"
+				}
+				err = theConfig.AddNamereferenceFieldSpec(
+					NameBackReferences{
+						Gvk: gvk.Gvk{Kind: kind, Version: version},
+						FieldSpecs: []FieldSpec{
+							makeFs(theGvk, append(path, propName, nameKey))},
+					})
+				if err != nil {
+					return
+				}
+			}
+		}
+		if property.Ref.GetURL() != nil {
+			loadCrdIntoConfig(
+				theConfig, theGvk, theMap,
+				property.Ref.String(), append(path, propName))
+		}
+	}
+	return nil
+}
+
+func makeFs(in gvk.Gvk, path []string) FieldSpec {
+	return FieldSpec{
+		CreateIfNotPresent: false,
+		Gvk:                in,
+		Path:               strings.Join(path, "/"),
+	}
+}
diff --git a/vendor/sigs.k8s.io/kustomize/pkg/transformers/config/fieldspec.go b/vendor/sigs.k8s.io/kustomize/pkg/transformers/config/fieldspec.go
new file mode 100644
index 00000000..5b0f6ee3
--- /dev/null
+++ b/vendor/sigs.k8s.io/kustomize/pkg/transformers/config/fieldspec.go
@@ -0,0 +1,139 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package config
+
+import (
+	"fmt"
+	"strings"
+
+	"sigs.k8s.io/kustomize/pkg/gvk"
+)
+
+// FieldSpec completely specifies a kustomizable field in
+// an unstructured representation of a k8s API object.
+// It helps define the operands of transformations.
+//
+// For example, a directive to add a common label to objects
+// will need to know that a 'Deployment' object (in API group
+// 'apps', any version) can have labels at field path
+// 'spec/template/metadata/labels', and further that it is OK
+// (or not OK) to add that field path to the object if the
+// field path doesn't exist already.
+//
+// This would look like
+// {
+//   group: apps
+//   kind: Deployment
+//   path: spec/template/metadata/labels
+//   create: true
+// }
+type FieldSpec struct {
+	gvk.Gvk            `json:",inline,omitempty" yaml:",inline,omitempty"`
+	Path               string `json:"path,omitempty" yaml:"path,omitempty"`
+	CreateIfNotPresent bool   `json:"create,omitempty" yaml:"create,omitempty"`
+}
+
+const (
+	escapedForwardSlash  = "\\/"
+	tempSlashReplacement = "???"
+)
+
+func (fs FieldSpec) String() string {
+	return fmt.Sprintf(
+		"%s:%v:%s", fs.Gvk.String(), fs.CreateIfNotPresent, fs.Path)
+}
+
+// If true, the primary key is the same, but other fields might not be.
+func (fs FieldSpec) effectivelyEquals(other FieldSpec) bool {
+	return fs.IsSelected(&other.Gvk) && fs.Path == other.Path
+}
+
+// PathSlice converts the path string to a slice of strings,
+// separated by a '/'. Forward slash can be contained in a
+// fieldname. such as ingress.kubernetes.io/auth-secret in
+// Ingress annotations. To deal with this special case, the
+// path to this field should be formatted as
+//
+//   metadata/annotations/ingress.kubernetes.io\/auth-secret
+//
+// Then PathSlice will return
+//
+//   []string{
+//      "metadata",
+//      "annotations",
+//      "ingress.auth-secretkubernetes.io/auth-secret"
+//   }
+func (fs FieldSpec) PathSlice() []string {
+	if !strings.Contains(fs.Path, escapedForwardSlash) {
+		return strings.Split(fs.Path, "/")
+	}
+	s := strings.Replace(fs.Path, escapedForwardSlash, tempSlashReplacement, -1)
+	paths := strings.Split(s, "/")
+	var result []string
+	for _, path := range paths {
+		result = append(result, strings.Replace(path, tempSlashReplacement, "/", -1))
+	}
+	return result
+}
+
+type fsSlice []FieldSpec
+
+func (s fsSlice) Len() int      { return len(s) }
+func (s fsSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
+func (s fsSlice) Less(i, j int) bool {
+	return s[i].Gvk.IsLessThan(s[j].Gvk)
+}
+
+// mergeAll merges the argument into this, returning the result.
+// Items already present are ignored.
+// Items that conflict (primary key matches, but remain data differs)
+// result in an error.
+func (s fsSlice) mergeAll(incoming fsSlice) (result fsSlice, err error) {
+	result = s
+	for _, x := range incoming {
+		result, err = result.mergeOne(x)
+		if err != nil {
+			return nil, err
+		}
+	}
+	return result, nil
+}
+
+// mergeOne merges the argument into this, returning the result.
+// If the item's primary key is already present, and there are no
+// conflicts, it is ignored (we don't want duplicates).
+// If there is a conflict, the merge fails.
+func (s fsSlice) mergeOne(x FieldSpec) (fsSlice, error) {
+	i := s.index(x)
+	if i > -1 {
+		// It's already there.
+		if s[i].CreateIfNotPresent != x.CreateIfNotPresent {
+			return nil, fmt.Errorf("conflicting fieldspecs")
+		}
+		return s, nil
+	}
+	return append(s, x), nil
+}
+
+func (s fsSlice) index(fs FieldSpec) int {
+	for i, x := range s {
+		if x.effectivelyEquals(fs) {
+			return i
+		}
+	}
+	return -1
+}
diff --git a/vendor/sigs.k8s.io/kustomize/pkg/transformers/config/namebackreferences.go b/vendor/sigs.k8s.io/kustomize/pkg/transformers/config/namebackreferences.go
new file mode 100644
index 00000000..172e4b3c
--- /dev/null
+++ b/vendor/sigs.k8s.io/kustomize/pkg/transformers/config/namebackreferences.go
@@ -0,0 +1,105 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package config
+
+import (
+	"strings"
+
+	"sigs.k8s.io/kustomize/pkg/gvk"
+)
+
+// NameBackReferences is an association between a gvk.GVK and a list
+// of FieldSpec instances that could refer to it.
+//
+// It is used to handle name changes, and can be thought of as a
+// a contact list.  If you change your own contact info (name,
+// phone number, etc.), you must tell your contacts or they won't
+// know about the change.
+//
+// For example, ConfigMaps can be used by Pods and everything that
+// contains a Pod; Deployment, Job, StatefulSet, etc.  To change
+// the name of a ConfigMap instance from 'alice' to 'bob', one
+// must visit all objects that could refer to the ConfigMap, see if
+// they mention 'alice', and if so, change the reference to 'bob'.
+//
+// The NameBackReferences instance to aid in this could look like
+//   {
+//     kind: ConfigMap
+//     version: v1
+//     FieldSpecs:
+//     - kind: Pod
+//       version: v1
+//       path: spec/volumes/configMap/name
+//     - kind: Deployment
+//       path: spec/template/spec/volumes/configMap/name
+//     - kind: Job
+//       path: spec/template/spec/volumes/configMap/name
+//       (etc.)
+//   }
+type NameBackReferences struct {
+	gvk.Gvk    `json:",inline,omitempty" yaml:",inline,omitempty"`
+	FieldSpecs fsSlice `json:"FieldSpecs,omitempty" yaml:"FieldSpecs,omitempty"`
+}
+
+func (n NameBackReferences) String() string {
+	var r []string
+	for _, f := range n.FieldSpecs {
+		r = append(r, f.String())
+	}
+	return n.Gvk.String() + ":  (\n" +
+		strings.Join(r, "\n") + "\n)"
+}
+
+type nbrSlice []NameBackReferences
+
+func (s nbrSlice) Len() int      { return len(s) }
+func (s nbrSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
+func (s nbrSlice) Less(i, j int) bool {
+	return s[i].Gvk.IsLessThan(s[j].Gvk)
+}
+
+func (s nbrSlice) mergeAll(o nbrSlice) (result nbrSlice, err error) {
+	result = s
+	for _, r := range o {
+		result, err = result.mergeOne(r)
+		if err != nil {
+			return nil, err
+		}
+	}
+	return result, nil
+}
+
+func (s nbrSlice) mergeOne(other NameBackReferences) (nbrSlice, error) {
+	var result nbrSlice
+	var err error
+	found := false
+	for _, c := range s {
+		if c.Gvk.Equals(other.Gvk) {
+			c.FieldSpecs, err = c.FieldSpecs.mergeAll(other.FieldSpecs)
+			if err != nil {
+				return nil, err
+			}
+			found = true
+		}
+		result = append(result, c)
+	}
+
+	if !found {
+		result = append(result, other)
+	}
+	return result, nil
+}
diff --git a/vendor/sigs.k8s.io/kustomize/pkg/transformers/config/transformerconfig.go b/vendor/sigs.k8s.io/kustomize/pkg/transformers/config/transformerconfig.go
new file mode 100644
index 00000000..556f0b81
--- /dev/null
+++ b/vendor/sigs.k8s.io/kustomize/pkg/transformers/config/transformerconfig.go
@@ -0,0 +1,134 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package config provides the functions to load default or user provided configurations
+// for different transformers
+package config
+
+import (
+	"log"
+	"sort"
+
+	"sigs.k8s.io/kustomize/pkg/transformers/config/defaultconfig"
+)
+
+// TransformerConfig holds the data needed to perform transformations.
+type TransformerConfig struct {
+	NamePrefix        fsSlice  `json:"namePrefix,omitempty" yaml:"namePrefix,omitempty"`
+	NameSuffix        fsSlice  `json:"nameSuffix,omitempty" yaml:"nameSuffix,omitempty"`
+	NameSpace         fsSlice  `json:"namespace,omitempty" yaml:"namespace,omitempty"`
+	CommonLabels      fsSlice  `json:"commonLabels,omitempty" yaml:"commonLabels,omitempty"`
+	CommonAnnotations fsSlice  `json:"commonAnnotations,omitempty" yaml:"commonAnnotations,omitempty"`
+	NameReference     nbrSlice `json:"nameReference,omitempty" yaml:"nameReference,omitempty"`
+	VarReference      fsSlice  `json:"varReference,omitempty" yaml:"varReference,omitempty"`
+}
+
+// MakeEmptyConfig returns an empty TransformerConfig object
+func MakeEmptyConfig() *TransformerConfig {
+	return &TransformerConfig{}
+}
+
+// MakeDefaultConfig returns a default TransformerConfig.
+func MakeDefaultConfig() *TransformerConfig {
+	c, err := makeTransformerConfigFromBytes(
+		defaultconfig.GetDefaultFieldSpecs())
+	if err != nil {
+		log.Fatalf("Unable to make default transformconfig: %v", err)
+	}
+	return c
+}
+
+// sortFields provides determinism in logging, tests, etc.
+func (t *TransformerConfig) sortFields() {
+	sort.Sort(t.NamePrefix)
+	sort.Sort(t.NameSpace)
+	sort.Sort(t.CommonLabels)
+	sort.Sort(t.CommonAnnotations)
+	sort.Sort(t.NameReference)
+	sort.Sort(t.VarReference)
+}
+
+// AddPrefixFieldSpec adds a FieldSpec to NamePrefix
+func (t *TransformerConfig) AddPrefixFieldSpec(fs FieldSpec) (err error) {
+	t.NamePrefix, err = t.NamePrefix.mergeOne(fs)
+	return err
+}
+
+// AddSuffixFieldSpec adds a FieldSpec to NameSuffix
+func (t *TransformerConfig) AddSuffixFieldSpec(fs FieldSpec) (err error) {
+	t.NameSuffix, err = t.NameSuffix.mergeOne(fs)
+	return err
+}
+
+// AddLabelFieldSpec adds a FieldSpec to CommonLabels
+func (t *TransformerConfig) AddLabelFieldSpec(fs FieldSpec) (err error) {
+	t.CommonLabels, err = t.CommonLabels.mergeOne(fs)
+	return err
+}
+
+// AddAnnotationFieldSpec adds a FieldSpec to CommonAnnotations
+func (t *TransformerConfig) AddAnnotationFieldSpec(fs FieldSpec) (err error) {
+	t.CommonAnnotations, err = t.CommonAnnotations.mergeOne(fs)
+	return err
+}
+
+// AddNamereferenceFieldSpec adds a NameBackReferences to NameReference
+func (t *TransformerConfig) AddNamereferenceFieldSpec(
+	nbrs NameBackReferences) (err error) {
+	t.NameReference, err = t.NameReference.mergeOne(nbrs)
+	return err
+}
+
+// Merge merges two TransformerConfigs objects into
+// a new TransformerConfig object
+func (t *TransformerConfig) Merge(input *TransformerConfig) (
+	merged *TransformerConfig, err error) {
+	if input == nil {
+		return t, nil
+	}
+	merged = &TransformerConfig{}
+	merged.NamePrefix, err = t.NamePrefix.mergeAll(input.NamePrefix)
+	if err != nil {
+		return nil, err
+	}
+	merged.NameSuffix, err = t.NameSuffix.mergeAll(input.NameSuffix)
+	if err != nil {
+		return nil, err
+	}
+	merged.NameSpace, err = t.NameSpace.mergeAll(input.NameSpace)
+	if err != nil {
+		return nil, err
+	}
+	merged.CommonAnnotations, err = t.CommonAnnotations.mergeAll(
+		input.CommonAnnotations)
+	if err != nil {
+		return nil, err
+	}
+	merged.CommonLabels, err = t.CommonLabels.mergeAll(input.CommonLabels)
+	if err != nil {
+		return nil, err
+	}
+	merged.VarReference, err = t.VarReference.mergeAll(input.VarReference)
+	if err != nil {
+		return nil, err
+	}
+	merged.NameReference, err = t.NameReference.mergeAll(input.NameReference)
+	if err != nil {
+		return nil, err
+	}
+	merged.sortFields()
+	return merged, nil
+}
diff --git a/vendor/sigs.k8s.io/kustomize/pkg/transformers/image.go b/vendor/sigs.k8s.io/kustomize/pkg/transformers/image.go
new file mode 100644
index 00000000..2e079769
--- /dev/null
+++ b/vendor/sigs.k8s.io/kustomize/pkg/transformers/image.go
@@ -0,0 +1,171 @@
+/*
+Copyright 2019 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package transformers
+
+import (
+	"fmt"
+	"regexp"
+	"strings"
+
+	"sigs.k8s.io/kustomize/pkg/image"
+	"sigs.k8s.io/kustomize/pkg/resmap"
+)
+
+// imageTransformer replace image names and tags
+type imageTransformer struct {
+	images []image.Image
+}
+
+var _ Transformer = &imageTransformer{}
+
+// NewImageTransformer constructs an imageTransformer.
+func NewImageTransformer(slice []image.Image) (Transformer, error) {
+	return &imageTransformer{slice}, nil
+}
+
+// Transform finds the matching images and replaces name, tag and/or digest
+func (pt *imageTransformer) Transform(resources resmap.ResMap) error {
+	if len(pt.images) == 0 {
+		return nil
+	}
+	for _, res := range resources {
+		err := pt.findAndReplaceImage(res.Map())
+		if err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+/*
+ findAndReplaceImage replaces the image name and tags inside one object
+ It searches the object for container session
+ then loops though all images inside containers session,
+ finds matched ones and update the image name and tag name
+*/
+func (pt *imageTransformer) findAndReplaceImage(obj map[string]interface{}) error {
+	paths := []string{"containers", "initContainers"}
+	found := false
+	for _, path := range paths {
+		_, found = obj[path]
+		if found {
+			err := pt.updateContainers(obj, path)
+			if err != nil {
+				return err
+			}
+		}
+	}
+	if !found {
+		return pt.findContainers(obj)
+	}
+	return nil
+}
+
+func (pt *imageTransformer) updateContainers(obj map[string]interface{}, path string) error {
+	containers, ok := obj[path].([]interface{})
+	if !ok {
+		return fmt.Errorf("containers path is not of type []interface{} but %T", obj[path])
+	}
+	for i := range containers {
+		container := containers[i].(map[string]interface{})
+		containerImage, found := container["image"]
+		if !found {
+			continue
+		}
+
+		imageName := containerImage.(string)
+		for _, img := range pt.images {
+			if !isImageMatched(imageName, img.Name) {
+				continue
+			}
+			name, tag := split(imageName)
+			if img.NewName != "" {
+				name = img.NewName
+			}
+			if img.NewTag != "" {
+				tag = ":" + img.NewTag
+			}
+			if img.Digest != "" {
+				tag = "@" + img.Digest
+			}
+			container["image"] = name + tag
+			break
+		}
+	}
+	return nil
+}
+
+func (pt *imageTransformer) findContainers(obj map[string]interface{}) error {
+	for key := range obj {
+		switch typedV := obj[key].(type) {
+		case map[string]interface{}:
+			err := pt.findAndReplaceImage(typedV)
+			if err != nil {
+				return err
+			}
+		case []interface{}:
+			for i := range typedV {
+				item := typedV[i]
+				typedItem, ok := item.(map[string]interface{})
+				if ok {
+					err := pt.findAndReplaceImage(typedItem)
+					if err != nil {
+						return err
+					}
+				}
+			}
+		}
+	}
+	return nil
+}
+
+func isImageMatched(s, t string) bool {
+	// Tag values are limited to [a-zA-Z0-9_.-].
+	pattern, _ := regexp.Compile("^" + t + "(:[a-zA-Z0-9_.-]*)?$")
+	return pattern.MatchString(s)
+}
+
+// split separates and returns the name and tag parts
+// from the image string using either colon `:` or at `@` separators.
+// Note that the returned tag keeps its separator.
+func split(imageName string) (name string, tag string) {
+	// check if image name contains a domain
+	// if domain is present, ignore domain and check for `:`
+	ic := -1
+	if slashIndex := strings.Index(imageName, "/"); slashIndex < 0 {
+		ic = strings.LastIndex(imageName, ":")
+	} else {
+		lastIc := strings.LastIndex(imageName[slashIndex:], ":")
+		// set ic only if `:` is present
+		if lastIc > 0 {
+			ic = slashIndex + lastIc
+		}
+	}
+	ia := strings.LastIndex(imageName, "@")
+	if ic < 0 && ia < 0 {
+		return imageName, ""
+	}
+
+	i := ic
+	if ic < 0 {
+		i = ia
+	}
+
+	name = imageName[:i]
+	tag = imageName[i:]
+	return
+}
diff --git a/vendor/sigs.k8s.io/kustomize/pkg/transformers/labelsandannotations.go b/vendor/sigs.k8s.io/kustomize/pkg/transformers/labelsandannotations.go
new file mode 100644
index 00000000..836abcaa
--- /dev/null
+++ b/vendor/sigs.k8s.io/kustomize/pkg/transformers/labelsandannotations.go
@@ -0,0 +1,86 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package transformers
+
+import (
+	"errors"
+	"fmt"
+
+	"sigs.k8s.io/kustomize/pkg/resmap"
+	"sigs.k8s.io/kustomize/pkg/transformers/config"
+)
+
+// mapTransformer applies a string->string map to fieldSpecs.
+type mapTransformer struct {
+	m          map[string]string
+	fieldSpecs []config.FieldSpec
+}
+
+var _ Transformer = &mapTransformer{}
+
+// NewLabelsMapTransformer constructs a mapTransformer.
+func NewLabelsMapTransformer(
+	m map[string]string, fs []config.FieldSpec) (Transformer, error) {
+	return NewMapTransformer(fs, m)
+}
+
+// NewAnnotationsMapTransformer construct a mapTransformer.
+func NewAnnotationsMapTransformer(
+	m map[string]string, fs []config.FieldSpec) (Transformer, error) {
+	return NewMapTransformer(fs, m)
+}
+
+// NewMapTransformer construct a mapTransformer.
+func NewMapTransformer(
+	pc []config.FieldSpec, m map[string]string) (Transformer, error) {
+	if m == nil {
+		return NewNoOpTransformer(), nil
+	}
+	if pc == nil {
+		return nil, errors.New("fieldSpecs is not expected to be nil")
+	}
+	return &mapTransformer{fieldSpecs: pc, m: m}, nil
+}
+
+// Transform apply each <key, value> pair in the mapTransformer to the
+// fields specified in mapTransformer.
+func (o *mapTransformer) Transform(m resmap.ResMap) error {
+	for id := range m {
+		objMap := m[id].Map()
+		for _, path := range o.fieldSpecs {
+			if !id.Gvk().IsSelected(&path.Gvk) {
+				continue
+			}
+			err := mutateField(objMap, path.PathSlice(), path.CreateIfNotPresent, o.addMap)
+			if err != nil {
+				return err
+			}
+		}
+	}
+	return nil
+}
+
+func (o *mapTransformer) addMap(in interface{}) (interface{}, error) {
+	m, ok := in.(map[string]interface{})
+	if !ok {
+		return nil, fmt.Errorf("%#v is expected to be %T", in, m)
+	}
+	for k, v := range o.m {
+		m[k] = v
+	}
+	return m, nil
+}
diff --git a/vendor/sigs.k8s.io/kustomize/pkg/transformers/multitransformer.go b/vendor/sigs.k8s.io/kustomize/pkg/transformers/multitransformer.go
new file mode 100644
index 00000000..d5921d1a
--- /dev/null
+++ b/vendor/sigs.k8s.io/kustomize/pkg/transformers/multitransformer.go
@@ -0,0 +1,95 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package transformers
+
+import (
+	"fmt"
+	"sigs.k8s.io/kustomize/pkg/resource"
+
+	"sigs.k8s.io/kustomize/pkg/resmap"
+)
+
+// multiTransformer contains a list of transformers.
+type multiTransformer struct {
+	transformers         []Transformer
+	checkConflictEnabled bool
+	rf                   *resource.Factory
+}
+
+var _ Transformer = &multiTransformer{}
+
+// NewMultiTransformer constructs a multiTransformer.
+func NewMultiTransformer(t []Transformer) Transformer {
+	r := &multiTransformer{
+		transformers:         make([]Transformer, len(t)),
+		checkConflictEnabled: false}
+	copy(r.transformers, t)
+	return r
+}
+
+// NewMultiTransformerWithConflictCheck constructs a multiTransformer with checking of conflicts.
+func NewMultiTransformerWithConflictCheck(t []Transformer) Transformer {
+	r := &multiTransformer{
+		transformers:         make([]Transformer, len(t)),
+		checkConflictEnabled: true}
+	copy(r.transformers, t)
+	return r
+}
+
+// Transform prepends the name prefix.
+func (o *multiTransformer) Transform(m resmap.ResMap) error {
+	if o.checkConflictEnabled {
+		return o.transformWithCheckConflict(m)
+	}
+	return o.transform(m)
+}
+func (o *multiTransformer) transform(m resmap.ResMap) error {
+	for _, t := range o.transformers {
+		err := t.Transform(m)
+		if err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+// Of the len(o.transformers)! possible transformer orderings, compare to a reversed order.
+// A spot check to perform when the transformations are supposed to be commutative.
+// Fail if there's a difference in the result.
+func (o *multiTransformer) transformWithCheckConflict(m resmap.ResMap) error {
+	mcopy := m.DeepCopy(o.rf)
+	err := o.transform(m)
+	if err != nil {
+		return err
+	}
+	o.reverseTransformers()
+	err = o.transform(mcopy)
+	if err != nil {
+		return err
+	}
+	err = m.ErrorIfNotEqual(mcopy)
+	if err != nil {
+		return fmt.Errorf("found conflict between different patches\n%v", err)
+	}
+	return nil
+}
+
+func (o *multiTransformer) reverseTransformers() {
+	for i, j := 0, len(o.transformers)-1; i < j; i, j = i+1, j-1 {
+		o.transformers[i], o.transformers[j] = o.transformers[j], o.transformers[i]
+	}
+}
diff --git a/vendor/sigs.k8s.io/kustomize/pkg/transformers/mutatefield.go b/vendor/sigs.k8s.io/kustomize/pkg/transformers/mutatefield.go
new file mode 100644
index 00000000..eddfeee2
--- /dev/null
+++ b/vendor/sigs.k8s.io/kustomize/pkg/transformers/mutatefield.go
@@ -0,0 +1,81 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package transformers
+
+import (
+	"fmt"
+	"log"
+	"strings"
+)
+
+type mutateFunc func(interface{}) (interface{}, error)
+
+func mutateField(
+	m map[string]interface{},
+	pathToField []string,
+	createIfNotPresent bool,
+	fns ...mutateFunc) error {
+	if len(pathToField) == 0 {
+		return nil
+	}
+
+	_, found := m[pathToField[0]]
+	if !found {
+		if !createIfNotPresent {
+			return nil
+		}
+		m[pathToField[0]] = map[string]interface{}{}
+	}
+
+	if len(pathToField) == 1 {
+		var err error
+		for _, fn := range fns {
+			m[pathToField[0]], err = fn(m[pathToField[0]])
+			if err != nil {
+				return err
+			}
+		}
+		return nil
+	}
+
+	v := m[pathToField[0]]
+	newPathToField := pathToField[1:]
+	switch typedV := v.(type) {
+	case nil:
+		log.Printf(
+			"nil value at `%s` ignored in mutation attempt",
+			strings.Join(pathToField, "."))
+		return nil
+	case map[string]interface{}:
+		return mutateField(typedV, newPathToField, createIfNotPresent, fns...)
+	case []interface{}:
+		for i := range typedV {
+			item := typedV[i]
+			typedItem, ok := item.(map[string]interface{})
+			if !ok {
+				return fmt.Errorf("%#v is expected to be %T", item, typedItem)
+			}
+			err := mutateField(typedItem, newPathToField, createIfNotPresent, fns...)
+			if err != nil {
+				return err
+			}
+		}
+		return nil
+	default:
+		return fmt.Errorf("%#v is not expected to be a primitive type", typedV)
+	}
+}
diff --git a/vendor/sigs.k8s.io/kustomize/pkg/transformers/namereference.go b/vendor/sigs.k8s.io/kustomize/pkg/transformers/namereference.go
new file mode 100644
index 00000000..a4e8a7f8
--- /dev/null
+++ b/vendor/sigs.k8s.io/kustomize/pkg/transformers/namereference.go
@@ -0,0 +1,144 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package transformers
+
+import (
+	"fmt"
+	"log"
+
+	"sigs.k8s.io/kustomize/pkg/gvk"
+	"sigs.k8s.io/kustomize/pkg/resmap"
+	"sigs.k8s.io/kustomize/pkg/transformers/config"
+)
+
+type nameReferenceTransformer struct {
+	backRefs []config.NameBackReferences
+}
+
+var _ Transformer = &nameReferenceTransformer{}
+
+// NewNameReferenceTransformer constructs a nameReferenceTransformer
+// with a given slice of NameBackReferences.
+func NewNameReferenceTransformer(br []config.NameBackReferences) Transformer {
+	if br == nil {
+		log.Fatal("backrefs not expected to be nil")
+	}
+	return &nameReferenceTransformer{backRefs: br}
+}
+
+// Transform updates name references in resource A that refer to resource B,
+// given that B's name may have changed.
+//
+// For example, a HorizontalPodAutoscaler (HPA) necessarily refers to a
+// Deployment (the thing that the HPA scales). The Deployment name might change
+// (e.g. prefix added), and the reference in the HPA has to be fixed.
+//
+// In the outer loop below, we encounter an HPA.  In scanning backrefs, we
+// find that HPA refers to a Deployment.  So we find all resources in the same
+// namespace as the HPA (and with the same prefix and suffix), and look through
+// them to find all the Deployments with a resId that has a Name matching the
+// field in HPA.  For each match, we overwrite the HPA name field with the value
+// found in the Deployment's name field (the name in the raw object - the
+// modified name - not the unmodified name in the resId).
+//
+// This assumes that the name stored in a ResId (the ResMap key) isn't modified
+// by name transformers.  Name transformers should only modify the name in the
+// body of the resource object (the value in the ResMap).
+func (o *nameReferenceTransformer) Transform(m resmap.ResMap) error {
+	// TODO: Too much looping.
+	// Even more hidden loops in FilterBy,
+	// updateNameReference and FindByGVKN.
+	for id := range m {
+		for _, backRef := range o.backRefs {
+			for _, fSpec := range backRef.FieldSpecs {
+				if id.Gvk().IsSelected(&fSpec.Gvk) {
+					err := mutateField(
+						m[id].Map(), fSpec.PathSlice(),
+						fSpec.CreateIfNotPresent,
+						o.updateNameReference(
+							backRef.Gvk, m.FilterBy(id)))
+					if err != nil {
+						return err
+					}
+				}
+			}
+		}
+	}
+	return nil
+}
+
+func (o *nameReferenceTransformer) updateNameReference(
+	backRef gvk.Gvk, m resmap.ResMap) func(in interface{}) (interface{}, error) {
+	return func(in interface{}) (interface{}, error) {
+		switch in.(type) {
+		case string:
+			s, _ := in.(string)
+			for id, res := range m {
+				if id.Gvk().IsSelected(&backRef) && id.Name() == s {
+					matchedIds := m.GetMatchingIds(id.GvknEquals)
+					// If there's more than one match, there's no way
+					// to know which one to pick, so emit error.
+					if len(matchedIds) > 1 {
+						return nil, fmt.Errorf(
+							"Multiple matches for name %s:\n  %v", id, matchedIds)
+					}
+					// Return transformed name of the object,
+					// complete with prefixes, hashes, etc.
+					return res.GetName(), nil
+				}
+			}
+			return in, nil
+		case []interface{}:
+			l, _ := in.([]interface{})
+			var names []string
+			for _, item := range l {
+				name, ok := item.(string)
+				if !ok {
+					return nil, fmt.Errorf("%#v is expected to be %T", item, name)
+				}
+				names = append(names, name)
+			}
+			for id, res := range m {
+				indexes := indexOf(id.Name(), names)
+				if id.Gvk().IsSelected(&backRef) && len(indexes) > 0 {
+					matchedIds := m.GetMatchingIds(id.GvknEquals)
+					if len(matchedIds) > 1 {
+						return nil, fmt.Errorf(
+							"Multiple matches for name %s:\n %v", id, matchedIds)
+					}
+					for _, index := range indexes {
+						l[index] = res.GetName()
+					}
+					return l, nil
+				}
+			}
+			return in, nil
+		default:
+			return nil, fmt.Errorf("%#v is expected to be either a string or a []interface{}", in)
+		}
+	}
+}
+
+func indexOf(s string, slice []string) []int {
+	var index []int
+	for i, item := range slice {
+		if item == s {
+			index = append(index, i)
+		}
+	}
+	return index
+}
diff --git a/vendor/sigs.k8s.io/kustomize/pkg/transformers/namespace.go b/vendor/sigs.k8s.io/kustomize/pkg/transformers/namespace.go
new file mode 100644
index 00000000..5f0c0648
--- /dev/null
+++ b/vendor/sigs.k8s.io/kustomize/pkg/transformers/namespace.go
@@ -0,0 +1,121 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package transformers
+
+import (
+	"sigs.k8s.io/kustomize/pkg/gvk"
+	"sigs.k8s.io/kustomize/pkg/resmap"
+	"sigs.k8s.io/kustomize/pkg/transformers/config"
+)
+
+type namespaceTransformer struct {
+	namespace        string
+	fieldSpecsToUse  []config.FieldSpec
+	fieldSpecsToSkip []config.FieldSpec
+}
+
+var _ Transformer = &namespaceTransformer{}
+
+// NewNamespaceTransformer construct a namespaceTransformer.
+func NewNamespaceTransformer(ns string, cf []config.FieldSpec) Transformer {
+	if len(ns) == 0 {
+		return NewNoOpTransformer()
+	}
+	var skip []config.FieldSpec
+	for _, g := range gvk.ClusterLevelGvks() {
+		skip = append(skip, config.FieldSpec{Gvk: g})
+	}
+	return &namespaceTransformer{
+		namespace:        ns,
+		fieldSpecsToUse:  cf,
+		fieldSpecsToSkip: skip,
+	}
+}
+
+// Transform adds the namespace.
+func (o *namespaceTransformer) Transform(m resmap.ResMap) error {
+	mf := resmap.ResMap{}
+
+	for id := range m {
+		found := false
+		for _, path := range o.fieldSpecsToSkip {
+			if id.Gvk().IsSelected(&path.Gvk) {
+				found = true
+				break
+			}
+		}
+		if !found {
+			mf[id] = m[id]
+			delete(m, id)
+		}
+	}
+
+	for id := range mf {
+		objMap := mf[id].Map()
+		for _, path := range o.fieldSpecsToUse {
+			if !id.Gvk().IsSelected(&path.Gvk) {
+				continue
+			}
+
+			err := mutateField(objMap, path.PathSlice(), path.CreateIfNotPresent, func(_ interface{}) (interface{}, error) {
+				return o.namespace, nil
+			})
+			if err != nil {
+				return err
+			}
+			newid := id.CopyWithNewNamespace(o.namespace)
+			m[newid] = mf[id]
+		}
+
+	}
+	o.updateClusterRoleBinding(m)
+	return nil
+}
+
+func (o *namespaceTransformer) updateClusterRoleBinding(m resmap.ResMap) {
+	saMap := map[string]bool{}
+	for id := range m {
+		if id.Gvk().Equals(gvk.Gvk{Version: "v1", Kind: "ServiceAccount"}) {
+			saMap[id.Name()] = true
+		}
+	}
+
+	for id := range m {
+		if id.Gvk().Kind != "ClusterRoleBinding" && id.Gvk().Kind != "RoleBinding" {
+			continue
+		}
+		objMap := m[id].Map()
+		subjects := objMap["subjects"].([]interface{})
+		for i := range subjects {
+			subject := subjects[i].(map[string]interface{})
+			kind, foundk := subject["kind"]
+			name, foundn := subject["name"]
+			if !foundk || !foundn || kind.(string) != "ServiceAccount" {
+				continue
+			}
+			// a ServiceAccount named “default” exists in every active namespace
+			if name.(string) == "default" || saMap[name.(string)] {
+				subject := subjects[i].(map[string]interface{})
+				mutateField(subject, []string{"namespace"}, true, func(_ interface{}) (interface{}, error) {
+					return o.namespace, nil
+				})
+				subjects[i] = subject
+			}
+		}
+		objMap["subjects"] = subjects
+	}
+}
diff --git a/vendor/sigs.k8s.io/kustomize/pkg/transformers/nooptransformer.go b/vendor/sigs.k8s.io/kustomize/pkg/transformers/nooptransformer.go
new file mode 100644
index 00000000..c07389b3
--- /dev/null
+++ b/vendor/sigs.k8s.io/kustomize/pkg/transformers/nooptransformer.go
@@ -0,0 +1,34 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package transformers
+
+import "sigs.k8s.io/kustomize/pkg/resmap"
+
+// noOpTransformer contains a no-op transformer.
+type noOpTransformer struct{}
+
+var _ Transformer = &noOpTransformer{}
+
+// NewNoOpTransformer constructs a noOpTransformer.
+func NewNoOpTransformer() Transformer {
+	return &noOpTransformer{}
+}
+
+// Transform does nothing.
+func (o *noOpTransformer) Transform(_ resmap.ResMap) error {
+	return nil
+}
diff --git a/vendor/sigs.k8s.io/kustomize/pkg/transformers/prefixsuffixname.go b/vendor/sigs.k8s.io/kustomize/pkg/transformers/prefixsuffixname.go
new file mode 100644
index 00000000..c4ca85f5
--- /dev/null
+++ b/vendor/sigs.k8s.io/kustomize/pkg/transformers/prefixsuffixname.go
@@ -0,0 +1,109 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package transformers
+
+import (
+	"errors"
+	"fmt"
+
+	"sigs.k8s.io/kustomize/pkg/gvk"
+	"sigs.k8s.io/kustomize/pkg/resmap"
+	"sigs.k8s.io/kustomize/pkg/transformers/config"
+)
+
+// namePrefixSuffixTransformer contains the prefix, suffix, and the FieldSpecs
+// for each field needing a name prefix and suffix.
+type namePrefixSuffixTransformer struct {
+	prefix           string
+	suffix           string
+	fieldSpecsToUse  []config.FieldSpec
+	fieldSpecsToSkip []config.FieldSpec
+}
+
+var _ Transformer = &namePrefixSuffixTransformer{}
+
+var prefixSuffixFieldSpecsToSkip = []config.FieldSpec{
+	{
+		Gvk: gvk.Gvk{Kind: "CustomResourceDefinition"},
+	},
+}
+
+// NewNamePrefixSuffixTransformer makes a namePrefixSuffixTransformer.
+func NewNamePrefixSuffixTransformer(
+	np, ns string, fieldSpecs []config.FieldSpec) (Transformer, error) {
+	if len(np) == 0 && len(ns) == 0 {
+		return NewNoOpTransformer(), nil
+	}
+	if fieldSpecs == nil {
+		return nil, errors.New("fieldSpecs is not expected to be nil")
+	}
+	return &namePrefixSuffixTransformer{
+		prefix:           np,
+		suffix:           ns,
+		fieldSpecsToUse:  fieldSpecs,
+		fieldSpecsToSkip: prefixSuffixFieldSpecsToSkip}, nil
+}
+
+// Transform prepends the name prefix and appends the name suffix.
+func (o *namePrefixSuffixTransformer) Transform(m resmap.ResMap) error {
+	// Fill map "mf" with entries subject to name modification, and
+	// delete these entries from "m", so that for now m retains only
+	// the entries whose names will not be modified.
+	mf := resmap.ResMap{}
+	for id := range m {
+		found := false
+		for _, path := range o.fieldSpecsToSkip {
+			if id.Gvk().IsSelected(&path.Gvk) {
+				found = true
+				break
+			}
+		}
+		if !found {
+			mf[id] = m[id]
+			delete(m, id)
+		}
+	}
+
+	for id := range mf {
+		objMap := mf[id].Map()
+		for _, path := range o.fieldSpecsToUse {
+			if !id.Gvk().IsSelected(&path.Gvk) {
+				continue
+			}
+			err := mutateField(
+				objMap,
+				path.PathSlice(),
+				path.CreateIfNotPresent,
+				o.addPrefixSuffix)
+			if err != nil {
+				return err
+			}
+			newId := id.CopyWithNewPrefixSuffix(o.prefix, o.suffix)
+			m[newId] = mf[id]
+		}
+	}
+	return nil
+}
+
+func (o *namePrefixSuffixTransformer) addPrefixSuffix(
+	in interface{}) (interface{}, error) {
+	s, ok := in.(string)
+	if !ok {
+		return nil, fmt.Errorf("%#v is expected to be %T", in, s)
+	}
+	return fmt.Sprintf("%s%s%s", o.prefix, s, o.suffix), nil
+}
diff --git a/vendor/sigs.k8s.io/kustomize/pkg/transformers/refvars.go b/vendor/sigs.k8s.io/kustomize/pkg/transformers/refvars.go
new file mode 100644
index 00000000..b31ec6e7
--- /dev/null
+++ b/vendor/sigs.k8s.io/kustomize/pkg/transformers/refvars.go
@@ -0,0 +1,94 @@
+package transformers
+
+import (
+	"fmt"
+	"sigs.k8s.io/kustomize/pkg/expansion"
+	"sigs.k8s.io/kustomize/pkg/resmap"
+	"sigs.k8s.io/kustomize/pkg/transformers/config"
+)
+
+type RefVarTransformer struct {
+	varMap            map[string]string
+	replacementCounts map[string]int
+	fieldSpecs        []config.FieldSpec
+	mappingFunc       func(string) string
+}
+
+// NewRefVarTransformer returns a new RefVarTransformer
+// that replaces $(VAR) style variables with values.
+// The fieldSpecs are the places to look for occurrences of $(VAR).
+func NewRefVarTransformer(
+	varMap map[string]string, fs []config.FieldSpec) *RefVarTransformer {
+	return &RefVarTransformer{
+		varMap:     varMap,
+		fieldSpecs: fs,
+	}
+}
+
+// replaceVars accepts as 'in' a string, or string array, which can have
+// embedded instances of $VAR style variables, e.g. a container command string.
+// The function returns the string with the variables expanded to their final
+// values.
+func (rv *RefVarTransformer) replaceVars(in interface{}) (interface{}, error) {
+	switch vt := in.(type) {
+	case []interface{}:
+		var xs []string
+		for _, a := range in.([]interface{}) {
+			xs = append(xs, expansion.Expand(a.(string), rv.mappingFunc))
+		}
+		return xs, nil
+	case map[string]interface{}:
+		inMap := in.(map[string]interface{})
+		xs := make(map[string]interface{}, len(inMap))
+		for k, v := range inMap {
+			s, ok := v.(string)
+			if !ok {
+				return nil, fmt.Errorf("%#v is expected to be %T", v, s)
+			}
+			xs[k] = expansion.Expand(s, rv.mappingFunc)
+		}
+		return xs, nil
+	case interface{}:
+		s, ok := in.(string)
+		if !ok {
+			return nil, fmt.Errorf("%#v is expected to be %T", in, s)
+		}
+		return expansion.Expand(s, rv.mappingFunc), nil
+	case nil:
+		return nil, nil
+	default:
+		return "", fmt.Errorf("invalid type encountered %T", vt)
+	}
+}
+
+// UnusedVars returns slice of Var names that were unused
+// after a Transform run.
+func (rv *RefVarTransformer) UnusedVars() []string {
+	var unused []string
+	for k := range rv.varMap {
+		_, ok := rv.replacementCounts[k]
+		if !ok {
+			unused = append(unused, k)
+		}
+	}
+	return unused
+}
+
+// Transform replaces $(VAR) style variables with values.
+func (rv *RefVarTransformer) Transform(m resmap.ResMap) error {
+	rv.replacementCounts = make(map[string]int)
+	rv.mappingFunc = expansion.MappingFuncFor(
+		rv.replacementCounts, rv.varMap)
+	for id, res := range m {
+		for _, fieldSpec := range rv.fieldSpecs {
+			if id.Gvk().IsSelected(&fieldSpec.Gvk) {
+				if err := mutateField(
+					res.Map(), fieldSpec.PathSlice(),
+					false, rv.replaceVars); err != nil {
+					return err
+				}
+			}
+		}
+	}
+	return nil
+}
diff --git a/vendor/sigs.k8s.io/kustomize/pkg/transformers/transformer.go b/vendor/sigs.k8s.io/kustomize/pkg/transformers/transformer.go
new file mode 100644
index 00000000..dc6f8807
--- /dev/null
+++ b/vendor/sigs.k8s.io/kustomize/pkg/transformers/transformer.go
@@ -0,0 +1,26 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package transformers has implementations of resmap.ResMap transformers.
+package transformers
+
+import "sigs.k8s.io/kustomize/pkg/resmap"
+
+// A Transformer modifies an instance of resmap.ResMap.
+type Transformer interface {
+	// Transform modifies data in the argument, e.g. adding labels to resources that can be labelled.
+	Transform(m resmap.ResMap) error
+}
diff --git a/vendor/sigs.k8s.io/kustomize/pkg/types/genargs.go b/vendor/sigs.k8s.io/kustomize/pkg/types/genargs.go
new file mode 100644
index 00000000..bef093d3
--- /dev/null
+++ b/vendor/sigs.k8s.io/kustomize/pkg/types/genargs.go
@@ -0,0 +1,64 @@
+/*
+Copyright 2019 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package types
+
+import (
+	"strconv"
+	"strings"
+)
+
+// GenArgs contains both generator args and options
+type GenArgs struct {
+	args *GeneratorArgs
+	opts *GeneratorOptions
+}
+
+// NewGenArgs returns a new object of GenArgs
+func NewGenArgs(args *GeneratorArgs, opts *GeneratorOptions) *GenArgs {
+	return &GenArgs{
+		args: args,
+		opts: opts,
+	}
+}
+
+func (g *GenArgs) String() string {
+	if g == nil {
+		return "{nilGenArgs}"
+	}
+	return "{" +
+		strings.Join([]string{
+			"nsfx:" + strconv.FormatBool(g.NeedsHashSuffix()),
+			"beh:" + g.Behavior().String()},
+			",") +
+		"}"
+}
+
+// NeedHashSuffix returns true if the hash suffix is needed.
+// It is needed when the two conditions are both met
+//  1) GenArgs is not nil
+//  2) DisableNameSuffixHash in GeneratorOptions is not set to true
+func (g *GenArgs) NeedsHashSuffix() bool {
+	return g.args != nil && (g.opts == nil || g.opts.DisableNameSuffixHash == false)
+}
+
+// Behavior returns Behavior field of GeneratorArgs
+func (g *GenArgs) Behavior() GenerationBehavior {
+	if g.args == nil {
+		return BehaviorUnspecified
+	}
+	return NewGenerationBehavior(g.args.Behavior)
+}
diff --git a/vendor/sigs.k8s.io/kustomize/pkg/types/generationbehavior.go b/vendor/sigs.k8s.io/kustomize/pkg/types/generationbehavior.go
new file mode 100644
index 00000000..67ba8a0b
--- /dev/null
+++ b/vendor/sigs.k8s.io/kustomize/pkg/types/generationbehavior.go
@@ -0,0 +1,59 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package types
+
+// GenerationBehavior specifies generation behavior of configmaps, secrets and maybe other resources.
+type GenerationBehavior int
+
+const (
+	// BehaviorUnspecified is an Unspecified behavior; typically treated as a Create.
+	BehaviorUnspecified GenerationBehavior = iota
+	// BehaviorCreate makes a new resource.
+	BehaviorCreate
+	// BehaviorReplace replaces a resource.
+	BehaviorReplace
+	// BehaviorMerge attempts to merge a new resource with an existing resource.
+	BehaviorMerge
+)
+
+// String converts a GenerationBehavior to a string.
+func (b GenerationBehavior) String() string {
+	switch b {
+	case BehaviorReplace:
+		return "replace"
+	case BehaviorMerge:
+		return "merge"
+	case BehaviorCreate:
+		return "create"
+	default:
+		return "unspecified"
+	}
+}
+
+// NewGenerationBehavior converts a string to a GenerationBehavior.
+func NewGenerationBehavior(s string) GenerationBehavior {
+	switch s {
+	case "replace":
+		return BehaviorReplace
+	case "merge":
+		return BehaviorMerge
+	case "create":
+		return BehaviorCreate
+	default:
+		return BehaviorUnspecified
+	}
+}
diff --git a/vendor/sigs.k8s.io/kustomize/pkg/types/kustomization.go b/vendor/sigs.k8s.io/kustomize/pkg/types/kustomization.go
new file mode 100644
index 00000000..12d09820
--- /dev/null
+++ b/vendor/sigs.k8s.io/kustomize/pkg/types/kustomization.go
@@ -0,0 +1,250 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package types holds struct definitions that should find a better home.
+package types
+
+import (
+	"regexp"
+
+	"sigs.k8s.io/kustomize/pkg/image"
+	"sigs.k8s.io/kustomize/pkg/patch"
+)
+
+const (
+	KustomizationVersion = "kustomize.config.k8s.io/v1beta1"
+	KustomizationKind    = "Kustomization"
+)
+
+// TypeMeta copies apimachinery/pkg/apis/meta/v1.TypeMeta
+type TypeMeta struct {
+	// Kind copies apimachinery/pkg/apis/meta/v1.Typemeta.Kind
+	Kind string `json:"kind,omitempty" protobuf:"bytes,1,opt,name=kind"`
+
+	// APIVersion copies apimachinery/pkg/apis/meta/v1.Typemeta.APIVersion
+	APIVersion string `json:"apiVersion,omitempty" protobuf:"bytes,2,opt,name=apiVersion"`
+}
+
+// Kustomization holds the information needed to generate customized k8s api resources.
+type Kustomization struct {
+	TypeMeta `json:",inline" yaml:",inline"`
+
+	//
+	// Operators - what kustomize can do.
+	//
+
+	// NamePrefix will prefix the names of all resources mentioned in the kustomization
+	// file including generated configmaps and secrets.
+	NamePrefix string `json:"namePrefix,omitempty" yaml:"namePrefix,omitempty"`
+
+	// NameSuffix will suffix the names of all resources mentioned in the kustomization
+	// file including generated configmaps and secrets.
+	NameSuffix string `json:"nameSuffix,omitempty" yaml:"nameSuffix,omitempty"`
+
+	// Namespace to add to all objects.
+	Namespace string `json:"namespace,omitempty" yaml:"namespace,omitempty"`
+
+	// CommonLabels to add to all objects and selectors.
+	CommonLabels map[string]string `json:"commonLabels,omitempty" yaml:"commonLabels,omitempty"`
+
+	// CommonAnnotations to add to all objects.
+	CommonAnnotations map[string]string `json:"commonAnnotations,omitempty" yaml:"commonAnnotations,omitempty"`
+
+	// PatchesStrategicMerge specifies the relative path to a file
+	// containing a strategic merge patch.  Format documented at
+	// https://github.com/kubernetes/community/blob/master/contributors/devel/strategic-merge-patch.md
+	// URLs and globs are not supported.
+	PatchesStrategicMerge []patch.StrategicMerge `json:"patchesStrategicMerge,omitempty" yaml:"patchesStrategicMerge,omitempty"`
+
+	// JSONPatches is a list of JSONPatch for applying JSON patch.
+	// Format documented at https://tools.ietf.org/html/rfc6902
+	// and http://jsonpatch.com
+	PatchesJson6902 []patch.Json6902 `json:"patchesJson6902,omitempty" yaml:"patchesJson6902,omitempty"`
+
+	// Images is a list of (image name, new name, new tag or digest)
+	// for changing image names, tags or digests. This can also be achieved with a
+	// patch, but this operator is simpler to specify.
+	Images []image.Image `json:"images,omitempty" yaml:"images,omitempty"`
+
+	// Vars allow things modified by kustomize to be injected into a
+	// container specification. A var is a name (e.g. FOO) associated
+	// with a field in a specific resource instance.  The field must
+	// contain a value of type string, and defaults to the name field
+	// of the instance.  Any appearance of "$(FOO)" in the container
+	// spec will be replaced at kustomize build time, after the final
+	// value of the specified field has been determined.
+	Vars []Var `json:"vars,omitempty" yaml:"vars,omitempty"`
+
+	//
+	// Operands - what kustomize operates on.
+	//
+
+	// Resources specifies relative paths to files holding YAML representations
+	// of kubernetes API objects. URLs and globs not supported.
+	Resources []string `json:"resources,omitempty" yaml:"resources,omitempty"`
+
+	// Crds specifies relative paths to Custom Resource Definition files.
+	// This allows custom resources to be recognized as operands, making
+	// it possible to add them to the Resources list.
+	// CRDs themselves are not modified.
+	Crds []string `json:"crds,omitempty" yaml:"crds,omitempty"`
+
+	// Bases are relative paths or github repository URLs specifying a
+	// directory containing a kustomization.yaml file.
+	// URL format: https://github.com/hashicorp/go-getter#url-format
+	Bases []string `json:"bases,omitempty" yaml:"bases,omitempty"`
+
+	//
+	// Generators (operators that create operands)
+	//
+
+	// ConfigMapGenerator is a list of configmaps to generate from
+	// local data (one configMap per list item).
+	// The resulting resource is a normal operand, subject to
+	// name prefixing, patching, etc.  By default, the name of
+	// the map will have a suffix hash generated from its contents.
+	ConfigMapGenerator []ConfigMapArgs `json:"configMapGenerator,omitempty" yaml:"configMapGenerator,omitempty"`
+
+	// SecretGenerator is a list of secrets to generate from
+	// local data (one secret per list item).
+	// The resulting resource is a normal operand, subject to
+	// name prefixing, patching, etc.  By default, the name of
+	// the map will have a suffix hash generated from its contents.
+	SecretGenerator []SecretArgs `json:"secretGenerator,omitempty" yaml:"secretGenerator,omitempty"`
+
+	// GeneratorOptions modify behavior of all ConfigMap and Secret generators.
+	GeneratorOptions *GeneratorOptions `json:"generatorOptions,omitempty" yaml:"generatorOptions,omitempty"`
+
+	// Configurations is a list of transformer configuration files
+	Configurations []string `json:"configurations,omitempty" yaml:"configurations,omitempty"`
+}
+
+// DealWithMissingFields fills the missing fields
+func (k *Kustomization) DealWithMissingFields() []string {
+	var msgs []string
+	if k.APIVersion == "" {
+		k.APIVersion = KustomizationVersion
+		msgs = append(msgs, "Fixed the missing field by adding apiVersion: "+KustomizationVersion)
+	}
+	if k.Kind == "" {
+		k.Kind = KustomizationKind
+		msgs = append(msgs, "Fixed the missing field by adding kind: "+KustomizationKind)
+	}
+	return msgs
+}
+
+func (k *Kustomization) EnforceFields() []string {
+	var errs []string
+	if k.APIVersion != "" && k.APIVersion != KustomizationVersion {
+		errs = append(errs, "apiVersion should be "+KustomizationVersion)
+	}
+	if k.Kind != "" && k.Kind != KustomizationKind {
+		errs = append(errs, "kind should be "+KustomizationKind)
+	}
+	return errs
+}
+
+// DealWithDeprecatedFields should be called immediately after
+// loading from storage.
+func DealWithDeprecatedFields(data []byte) []byte {
+	deprecateFieldsMap := map[string]string{
+		"patches:":   "patchesStrategicMerge:",
+		"imageTags:": "images:",
+	}
+	for oldname, newname := range deprecateFieldsMap {
+		pattern := regexp.MustCompile(oldname)
+		data = pattern.ReplaceAll(data, []byte(newname))
+	}
+	return data
+}
+
+// GeneratorArgs contains arguments common to generators.
+type GeneratorArgs struct {
+	// Namespace for the configmap, optional
+	Namespace string `json:"namespace,omitempty" yaml:"namespace,omitempty"`
+
+	// Name - actually the partial name - of the generated resource.
+	// The full name ends up being something like
+	// NamePrefix + this.Name + hash(content of generated resource).
+	Name string `json:"name,omitempty" yaml:"name,omitempty"`
+
+	// Behavior of generated resource, must be one of:
+	//   'create': create a new one
+	//   'replace': replace the existing one
+	//   'merge': merge with the existing one
+	Behavior string `json:"behavior,omitempty" yaml:"behavior,omitempty"`
+
+	// DataSources for the generator.
+	DataSources `json:",inline,omitempty" yaml:",inline,omitempty"`
+}
+
+// ConfigMapArgs contains the metadata of how to generate a configmap.
+type ConfigMapArgs struct {
+	// GeneratorArgs for the configmap.
+	GeneratorArgs `json:",inline,omitempty" yaml:",inline,omitempty"`
+}
+
+// SecretArgs contains the metadata of how to generate a secret.
+type SecretArgs struct {
+	// GeneratorArgs for the secret.
+	GeneratorArgs `json:",inline,omitempty" yaml:",inline,omitempty"`
+
+	// Type of the secret.
+	//
+	// This is the same field as the secret type field in v1/Secret:
+	// It can be "Opaque" (default), or "kubernetes.io/tls".
+	//
+	// If type is "kubernetes.io/tls", then "literals" or "files" must have exactly two
+	// keys: "tls.key" and "tls.crt"
+	Type string `json:"type,omitempty" yaml:"type,omitempty"`
+}
+
+// DataSources contains some generic sources for configmaps.
+type DataSources struct {
+	// LiteralSources is a list of literal sources.
+	// Each literal source should be a key and literal value,
+	// e.g. `somekey=somevalue`
+	// It will be similar to kubectl create configmap|secret --from-literal
+	LiteralSources []string `json:"literals,omitempty" yaml:"literals,omitempty"`
+
+	// FileSources is a list of file sources.
+	// Each file source can be specified using its file path, in which case file
+	// basename will be used as configmap key, or optionally with a key and file
+	// path, in which case the given key will be used.
+	// Specifying a directory will iterate each named file in the directory
+	// whose basename is a valid configmap key.
+	// It will be similar to kubectl create configmap|secret --from-file
+	FileSources []string `json:"files,omitempty" yaml:"files,omitempty"`
+
+	// EnvSource format should be a path to a file to read lines of key=val
+	// pairs to create a configmap.
+	// i.e. a Docker .env file or a .ini file.
+	EnvSource string `json:"env,omitempty" yaml:"env,omitempty"`
+}
+
+// GeneratorOptions modify behavior of all ConfigMap and Secret generators.
+type GeneratorOptions struct {
+	// Labels to add to all generated resources.
+	Labels map[string]string `json:"labels,omitempty" yaml:"labels,omitempty"`
+
+	// Annotations to add to all generated resources.
+	Annotations map[string]string `json:"annotations,omitempty" yaml:"annotations,omitempty"`
+
+	// DisableNameSuffixHash if true disables the default behavior of adding a
+	// suffix to the names of generated resources that is a hash of the
+	// resource contents.
+	DisableNameSuffixHash bool `json:"disableNameSuffixHash,omitempty" yaml:"disableNameSuffixHash,omitempty"`
+}
diff --git a/vendor/sigs.k8s.io/kustomize/pkg/types/var.go b/vendor/sigs.k8s.io/kustomize/pkg/types/var.go
new file mode 100644
index 00000000..6a48032a
--- /dev/null
+++ b/vendor/sigs.k8s.io/kustomize/pkg/types/var.go
@@ -0,0 +1,145 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package types
+
+import (
+	"fmt"
+	"sort"
+	"strings"
+
+	"sigs.k8s.io/kustomize/pkg/gvk"
+)
+
+const defaultFieldPath = "metadata.name"
+
+// Var represents a variable whose value will be sourced
+// from a field in a Kubernetes object.
+type Var struct {
+	// Value of identifier name e.g. FOO used in container args, annotations
+	// Appears in pod template as $(FOO)
+	Name string `json:"name" yaml:"name"`
+
+	// ObjRef must refer to a Kubernetes resource under the
+	// purview of this kustomization. ObjRef should use the
+	// raw name of the object (the name specified in its YAML,
+	// before addition of a namePrefix and a nameSuffix).
+	ObjRef Target `json:"objref" yaml:"objref"`
+
+	// FieldRef refers to the field of the object referred to by
+	// ObjRef whose value will be extracted for use in
+	// replacing $(FOO).
+	// If unspecified, this defaults to fieldPath: $defaultFieldPath
+	FieldRef FieldSelector `json:"fieldref,omitempty" yaml:"fieldref,omitempty"`
+}
+
+// Target refers to a kubernetes object by Group, Version, Kind and Name
+// gvk.Gvk contains Group, Version and Kind
+// APIVersion is added to keep the backward compatibility of using ObjectReference
+// for Var.ObjRef
+type Target struct {
+	APIVersion string `json:"apiVersion,omitempty" yaml:"apiVersion,omitempty"`
+	gvk.Gvk    `json:",inline,omitempty" yaml:",inline,omitempty"`
+	Name       string `json:"name" yaml:"name"`
+}
+
+// FieldSelector contains the fieldPath to an object field.
+// This struct is added to keep the backward compatibility of using ObjectFieldSelector
+// for Var.FieldRef
+type FieldSelector struct {
+	FieldPath string `json:"fieldPath,omitempty" yaml:"fieldPath,omitempty"`
+}
+
+// defaulting sets reference to field used by default.
+func (v *Var) defaulting() {
+	if v.FieldRef.FieldPath == "" {
+		v.FieldRef.FieldPath = defaultFieldPath
+	}
+}
+
+// VarSet is a slice of Vars where no var.Name is repeated.
+type VarSet struct {
+	set []Var
+}
+
+// Set returns a copy of the var set.
+func (vs *VarSet) Set() []Var {
+	s := make([]Var, len(vs.set))
+	copy(s, vs.set)
+	return s
+}
+
+// MergeSet absorbs other vars with error on name collision.
+func (vs *VarSet) MergeSet(incoming *VarSet) error {
+	return vs.MergeSlice(incoming.set)
+}
+
+// MergeSlice absorbs other vars with error on name collision.
+// Empty fields in incoming vars are defaulted.
+func (vs *VarSet) MergeSlice(incoming []Var) error {
+	for _, v := range incoming {
+		if vs.Contains(v) {
+			return fmt.Errorf(
+				"var %s already encountered", v.Name)
+		}
+		v.defaulting()
+		vs.insert(v)
+	}
+	return nil
+}
+
+func (vs *VarSet) insert(v Var) {
+	index := sort.Search(
+		len(vs.set),
+		func(i int) bool { return vs.set[i].Name > v.Name })
+	// make room
+	vs.set = append(vs.set, Var{})
+	// shift right at index.
+	// copy will not increase size of destination.
+	copy(vs.set[index+1:], vs.set[index:])
+	vs.set[index] = v
+}
+
+// Contains is true if the set has the other var.
+func (vs *VarSet) Contains(other Var) bool {
+	return vs.Get(other.Name) != nil
+}
+
+// Get returns the var with the given name, else nil.
+func (vs *VarSet) Get(name string) *Var {
+	for _, v := range vs.set {
+		if v.Name == name {
+			return &v
+		}
+	}
+	return nil
+}
+
+// GVK returns the Gvk object in Target
+func (t *Target) GVK() gvk.Gvk {
+	if t.APIVersion == "" {
+		return t.Gvk
+	}
+	versions := strings.Split(t.APIVersion, "/")
+	if len(versions) == 2 {
+		t.Group = versions[0]
+		t.Version = versions[1]
+	}
+	if len(versions) == 1 {
+		t.Version = versions[0]
+	}
+	return t.Gvk
+}
diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v3/LICENSE b/vendor/sigs.k8s.io/structured-merge-diff/v3/LICENSE
new file mode 100644
index 00000000..8dada3ed
--- /dev/null
+++ b/vendor/sigs.k8s.io/structured-merge-diff/v3/LICENSE
@@ -0,0 +1,201 @@
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "{}"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright {yyyy} {name of copyright owner}
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v3/value/allocator.go b/vendor/sigs.k8s.io/structured-merge-diff/v3/value/allocator.go
new file mode 100644
index 00000000..f70cd416
--- /dev/null
+++ b/vendor/sigs.k8s.io/structured-merge-diff/v3/value/allocator.go
@@ -0,0 +1,203 @@
+/*
+Copyright 2020 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package value
+
+// Allocator provides a value object allocation strategy.
+// Value objects can be allocated by passing an allocator to the "Using"
+// receiver functions on the value interfaces, e.g. Map.ZipUsing(allocator, ...).
+// Value objects returned from "Using" functions should be given back to the allocator
+// once longer needed by calling Allocator.Free(Value).
+type Allocator interface {
+	// Free gives the allocator back any value objects returned by the "Using"
+	// receiver functions on the value interfaces.
+	// interface{} may be any of: Value, Map, List or Range.
+	Free(interface{})
+
+	// The unexported functions are for "Using" receiver functions of the value types
+	// to request what they need from the allocator.
+	allocValueUnstructured() *valueUnstructured
+	allocListUnstructuredRange() *listUnstructuredRange
+	allocValueReflect() *valueReflect
+	allocMapReflect() *mapReflect
+	allocStructReflect() *structReflect
+	allocListReflect() *listReflect
+	allocListReflectRange() *listReflectRange
+}
+
+// HeapAllocator simply allocates objects to the heap. It is the default
+// allocator used receiver functions on the value interfaces that do not accept
+// an allocator and should be used whenever allocating objects that will not
+// be given back to an allocator by calling Allocator.Free(Value).
+var HeapAllocator = &heapAllocator{}
+
+type heapAllocator struct{}
+
+func (p *heapAllocator) allocValueUnstructured() *valueUnstructured {
+	return &valueUnstructured{}
+}
+
+func (p *heapAllocator) allocListUnstructuredRange() *listUnstructuredRange {
+	return &listUnstructuredRange{vv: &valueUnstructured{}}
+}
+
+func (p *heapAllocator) allocValueReflect() *valueReflect {
+	return &valueReflect{}
+}
+
+func (p *heapAllocator) allocStructReflect() *structReflect {
+	return &structReflect{}
+}
+
+func (p *heapAllocator) allocMapReflect() *mapReflect {
+	return &mapReflect{}
+}
+
+func (p *heapAllocator) allocListReflect() *listReflect {
+	return &listReflect{}
+}
+
+func (p *heapAllocator) allocListReflectRange() *listReflectRange {
+	return &listReflectRange{vr: &valueReflect{}}
+}
+
+func (p *heapAllocator) Free(_ interface{}) {}
+
+// NewFreelistAllocator creates freelist based allocator.
+// This allocator provides fast allocation and freeing of short lived value objects.
+//
+// The freelists are bounded in size by freelistMaxSize. If more than this amount of value objects is
+// allocated at once, the excess will be returned to the heap for garbage collection when freed.
+//
+// This allocator is unsafe and must not be accessed concurrently by goroutines.
+//
+// This allocator works well for traversal of value data trees. Typical usage is to acquire
+// a freelist at the beginning of the traversal and use it through out
+// for all temporary value access.
+func NewFreelistAllocator() Allocator {
+	return &freelistAllocator{
+		valueUnstructured: &freelist{new: func() interface{} {
+			return &valueUnstructured{}
+		}},
+		listUnstructuredRange: &freelist{new: func() interface{} {
+			return &listUnstructuredRange{vv: &valueUnstructured{}}
+		}},
+		valueReflect: &freelist{new: func() interface{} {
+			return &valueReflect{}
+		}},
+		mapReflect: &freelist{new: func() interface{} {
+			return &mapReflect{}
+		}},
+		structReflect: &freelist{new: func() interface{} {
+			return &structReflect{}
+		}},
+		listReflect: &freelist{new: func() interface{} {
+			return &listReflect{}
+		}},
+		listReflectRange: &freelist{new: func() interface{} {
+			return &listReflectRange{vr: &valueReflect{}}
+		}},
+	}
+}
+
+// Bound memory usage of freelists. This prevents the processing of very large lists from leaking memory.
+// This limit is large enough for endpoints objects containing 1000 IP address entries. Freed objects
+// that don't fit into the freelist are orphaned on the heap to be garbage collected.
+const freelistMaxSize = 1000
+
+type freelistAllocator struct {
+	valueUnstructured     *freelist
+	listUnstructuredRange *freelist
+	valueReflect          *freelist
+	mapReflect            *freelist
+	structReflect         *freelist
+	listReflect           *freelist
+	listReflectRange      *freelist
+}
+
+type freelist struct {
+	list []interface{}
+	new  func() interface{}
+}
+
+func (f *freelist) allocate() interface{} {
+	var w2 interface{}
+	if n := len(f.list); n > 0 {
+		w2, f.list = f.list[n-1], f.list[:n-1]
+	} else {
+		w2 = f.new()
+	}
+	return w2
+}
+
+func (f *freelist) free(v interface{}) {
+	if len(f.list) < freelistMaxSize {
+		f.list = append(f.list, v)
+	}
+}
+
+func (w *freelistAllocator) Free(value interface{}) {
+	switch v := value.(type) {
+	case *valueUnstructured:
+		v.Value = nil // don't hold references to unstructured objects
+		w.valueUnstructured.free(v)
+	case *listUnstructuredRange:
+		v.vv.Value = nil // don't hold references to unstructured objects
+		w.listUnstructuredRange.free(v)
+	case *valueReflect:
+		v.ParentMapKey = nil
+		v.ParentMap = nil
+		w.valueReflect.free(v)
+	case *mapReflect:
+		w.mapReflect.free(v)
+	case *structReflect:
+		w.structReflect.free(v)
+	case *listReflect:
+		w.listReflect.free(v)
+	case *listReflectRange:
+		v.vr.ParentMapKey = nil
+		v.vr.ParentMap = nil
+		w.listReflectRange.free(v)
+	}
+}
+
+func (w *freelistAllocator) allocValueUnstructured() *valueUnstructured {
+	return w.valueUnstructured.allocate().(*valueUnstructured)
+}
+
+func (w *freelistAllocator) allocListUnstructuredRange() *listUnstructuredRange {
+	return w.listUnstructuredRange.allocate().(*listUnstructuredRange)
+}
+
+func (w *freelistAllocator) allocValueReflect() *valueReflect {
+	return w.valueReflect.allocate().(*valueReflect)
+}
+
+func (w *freelistAllocator) allocStructReflect() *structReflect {
+	return w.structReflect.allocate().(*structReflect)
+}
+
+func (w *freelistAllocator) allocMapReflect() *mapReflect {
+	return w.mapReflect.allocate().(*mapReflect)
+}
+
+func (w *freelistAllocator) allocListReflect() *listReflect {
+	return w.listReflect.allocate().(*listReflect)
+}
+
+func (w *freelistAllocator) allocListReflectRange() *listReflectRange {
+	return w.listReflectRange.allocate().(*listReflectRange)
+}
diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v3/value/doc.go b/vendor/sigs.k8s.io/structured-merge-diff/v3/value/doc.go
new file mode 100644
index 00000000..84d7f0f3
--- /dev/null
+++ b/vendor/sigs.k8s.io/structured-merge-diff/v3/value/doc.go
@@ -0,0 +1,21 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package value defines types for an in-memory representation of yaml or json
+// objects, organized for convenient comparison with a schema (as defined by
+// the sibling schema package). Functions for reading and writing the objects
+// are also provided.
+package value
diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v3/value/fields.go b/vendor/sigs.k8s.io/structured-merge-diff/v3/value/fields.go
new file mode 100644
index 00000000..be3c6724
--- /dev/null
+++ b/vendor/sigs.k8s.io/structured-merge-diff/v3/value/fields.go
@@ -0,0 +1,97 @@
+/*
+Copyright 2019 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package value
+
+import (
+	"sort"
+	"strings"
+)
+
+// Field is an individual key-value pair.
+type Field struct {
+	Name  string
+	Value Value
+}
+
+// FieldList is a list of key-value pairs. Each field is expected to
+// have a different name.
+type FieldList []Field
+
+// Sort sorts the field list by Name.
+func (f FieldList) Sort() {
+	if len(f) < 2 {
+		return
+	}
+	if len(f) == 2 {
+		if f[1].Name < f[0].Name {
+			f[0], f[1] = f[1], f[0]
+		}
+		return
+	}
+	sort.SliceStable(f, func(i, j int) bool {
+		return f[i].Name < f[j].Name
+	})
+}
+
+// Less compares two lists lexically.
+func (f FieldList) Less(rhs FieldList) bool {
+	return f.Compare(rhs) == -1
+}
+
+// Compare compares two lists lexically. The result will be 0 if f==rhs, -1
+// if f < rhs, and +1 if f > rhs.
+func (f FieldList) Compare(rhs FieldList) int {
+	i := 0
+	for {
+		if i >= len(f) && i >= len(rhs) {
+			// Maps are the same length and all items are equal.
+			return 0
+		}
+		if i >= len(f) {
+			// F is shorter.
+			return -1
+		}
+		if i >= len(rhs) {
+			// RHS is shorter.
+			return 1
+		}
+		if c := strings.Compare(f[i].Name, rhs[i].Name); c != 0 {
+			return c
+		}
+		if c := Compare(f[i].Value, rhs[i].Value); c != 0 {
+			return c
+		}
+		// The items are equal; continue.
+		i++
+	}
+}
+
+// Equals returns true if the two fieldslist are equals, false otherwise.
+func (f FieldList) Equals(rhs FieldList) bool {
+	if len(f) != len(rhs) {
+		return false
+	}
+	for i := range f {
+		if f[i].Name != rhs[i].Name {
+			return false
+		}
+		if !Equals(f[i].Value, rhs[i].Value) {
+			return false
+		}
+	}
+	return true
+}
diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v3/value/jsontagutil.go b/vendor/sigs.k8s.io/structured-merge-diff/v3/value/jsontagutil.go
new file mode 100644
index 00000000..d4adb8fc
--- /dev/null
+++ b/vendor/sigs.k8s.io/structured-merge-diff/v3/value/jsontagutil.go
@@ -0,0 +1,91 @@
+/*
+Copyright 2019 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package value
+
+import (
+	"fmt"
+	"reflect"
+	"strings"
+)
+
+// TODO: This implements the same functionality as https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/runtime/converter.go#L236
+// but is based on the highly efficient approach from https://golang.org/src/encoding/json/encode.go
+
+func lookupJsonTags(f reflect.StructField) (name string, omit bool, inline bool, omitempty bool) {
+	tag := f.Tag.Get("json")
+	if tag == "-" {
+		return "", true, false, false
+	}
+	name, opts := parseTag(tag)
+	if name == "" {
+		name = f.Name
+	}
+	return name, false, opts.Contains("inline"), opts.Contains("omitempty")
+}
+
+func isZero(v reflect.Value) bool {
+	switch v.Kind() {
+	case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
+		return v.Len() == 0
+	case reflect.Bool:
+		return !v.Bool()
+	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+		return v.Int() == 0
+	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+		return v.Uint() == 0
+	case reflect.Float32, reflect.Float64:
+		return v.Float() == 0
+	case reflect.Interface, reflect.Ptr:
+		return v.IsNil()
+	case reflect.Chan, reflect.Func:
+		panic(fmt.Sprintf("unsupported type: %v", v.Type()))
+	}
+	return false
+}
+
+type tagOptions string
+
+// parseTag splits a struct field's json tag into its name and
+// comma-separated options.
+func parseTag(tag string) (string, tagOptions) {
+	if idx := strings.Index(tag, ","); idx != -1 {
+		return tag[:idx], tagOptions(tag[idx+1:])
+	}
+	return tag, tagOptions("")
+}
+
+// Contains reports whether a comma-separated list of options
+// contains a particular substr flag. substr must be surrounded by a
+// string boundary or commas.
+func (o tagOptions) Contains(optionName string) bool {
+	if len(o) == 0 {
+		return false
+	}
+	s := string(o)
+	for s != "" {
+		var next string
+		i := strings.Index(s, ",")
+		if i >= 0 {
+			s, next = s[:i], s[i+1:]
+		}
+		if s == optionName {
+			return true
+		}
+		s = next
+	}
+	return false
+}
diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v3/value/list.go b/vendor/sigs.k8s.io/structured-merge-diff/v3/value/list.go
new file mode 100644
index 00000000..0748f18e
--- /dev/null
+++ b/vendor/sigs.k8s.io/structured-merge-diff/v3/value/list.go
@@ -0,0 +1,139 @@
+/*
+Copyright 2019 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package value
+
+// List represents a list object.
+type List interface {
+	// Length returns how many items can be found in the map.
+	Length() int
+	// At returns the item at the given position in the map. It will
+	// panic if the index is out of range.
+	At(int) Value
+	// AtUsing uses the provided allocator and returns the item at the given
+	// position in the map. It will panic if the index is out of range.
+	// The returned Value should be given back to the Allocator when no longer needed
+	// by calling Allocator.Free(Value).
+	AtUsing(Allocator, int) Value
+	// Range returns a ListRange for iterating over the items in the list.
+	Range() ListRange
+	// RangeUsing uses the provided allocator and returns a ListRange for
+	// iterating over the items in the list.
+	// The returned Range should be given back to the Allocator when no longer needed
+	// by calling Allocator.Free(Value).
+	RangeUsing(Allocator) ListRange
+	// Equals compares the two lists, and return true if they are the same, false otherwise.
+	// Implementations can use ListEquals as a general implementation for this methods.
+	Equals(List) bool
+	// EqualsUsing uses the provided allocator and compares the two lists, and return true if
+	// they are the same, false otherwise. Implementations can use ListEqualsUsing as a general
+	// implementation for this methods.
+	EqualsUsing(Allocator, List) bool
+}
+
+// ListRange represents a single iteration across the items of a list.
+type ListRange interface {
+	// Next increments to the next item in the range, if there is one, and returns true, or returns false if there are no more items.
+	Next() bool
+	// Item returns the index and value of the current item in the range. or panics if there is no current item.
+	// For efficiency, Item may reuse the values returned by previous Item calls. Callers should be careful avoid holding
+	// pointers to the value returned by Item() that escape the iteration loop since they become invalid once either
+	// Item() or Allocator.Free() is called.
+	Item() (index int, value Value)
+}
+
+var EmptyRange = &emptyRange{}
+
+type emptyRange struct{}
+
+func (_ *emptyRange) Next() bool {
+	return false
+}
+
+func (_ *emptyRange) Item() (index int, value Value) {
+	panic("Item called on empty ListRange")
+}
+
+// ListEquals compares two lists lexically.
+// WARN: This is a naive implementation, calling lhs.Equals(rhs) is typically the most efficient.
+func ListEquals(lhs, rhs List) bool {
+	return ListEqualsUsing(HeapAllocator, lhs, rhs)
+}
+
+// ListEqualsUsing uses the provided allocator and compares two lists lexically.
+// WARN: This is a naive implementation, calling lhs.EqualsUsing(allocator, rhs) is typically the most efficient.
+func ListEqualsUsing(a Allocator, lhs, rhs List) bool {
+	if lhs.Length() != rhs.Length() {
+		return false
+	}
+
+	lhsRange := lhs.RangeUsing(a)
+	defer a.Free(lhsRange)
+	rhsRange := rhs.RangeUsing(a)
+	defer a.Free(rhsRange)
+
+	for lhsRange.Next() && rhsRange.Next() {
+		_, lv := lhsRange.Item()
+		_, rv := rhsRange.Item()
+		if !EqualsUsing(a, lv, rv) {
+			return false
+		}
+	}
+	return true
+}
+
+// ListLess compares two lists lexically.
+func ListLess(lhs, rhs List) bool {
+	return ListCompare(lhs, rhs) == -1
+}
+
+// ListCompare compares two lists lexically. The result will be 0 if l==rhs, -1
+// if l < rhs, and +1 if l > rhs.
+func ListCompare(lhs, rhs List) int {
+	return ListCompareUsing(HeapAllocator, lhs, rhs)
+}
+
+// ListCompareUsing uses the provided allocator and compares two lists lexically. The result will be 0 if l==rhs, -1
+// if l < rhs, and +1 if l > rhs.
+func ListCompareUsing(a Allocator, lhs, rhs List) int {
+	lhsRange := lhs.RangeUsing(a)
+	defer a.Free(lhsRange)
+	rhsRange := rhs.RangeUsing(a)
+	defer a.Free(rhsRange)
+
+	for {
+		lhsOk := lhsRange.Next()
+		rhsOk := rhsRange.Next()
+		if !lhsOk && !rhsOk {
+			// Lists are the same length and all items are equal.
+			return 0
+		}
+		if !lhsOk {
+			// LHS is shorter.
+			return -1
+		}
+		if !rhsOk {
+			// RHS is shorter.
+			return 1
+		}
+		_, lv := lhsRange.Item()
+		_, rv := rhsRange.Item()
+		if c := CompareUsing(a, lv, rv); c != 0 {
+			return c
+		}
+		// The items are equal; continue.
+	}
+}
diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v3/value/listreflect.go b/vendor/sigs.k8s.io/structured-merge-diff/v3/value/listreflect.go
new file mode 100644
index 00000000..197d4c92
--- /dev/null
+++ b/vendor/sigs.k8s.io/structured-merge-diff/v3/value/listreflect.go
@@ -0,0 +1,98 @@
+/*
+Copyright 2019 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package value
+
+import (
+	"reflect"
+)
+
+type listReflect struct {
+	Value reflect.Value
+}
+
+func (r listReflect) Length() int {
+	val := r.Value
+	return val.Len()
+}
+
+func (r listReflect) At(i int) Value {
+	val := r.Value
+	return mustWrapValueReflect(val.Index(i), nil, nil)
+}
+
+func (r listReflect) AtUsing(a Allocator, i int) Value {
+	val := r.Value
+	return a.allocValueReflect().mustReuse(val.Index(i), nil, nil, nil)
+}
+
+func (r listReflect) Unstructured() interface{} {
+	l := r.Length()
+	result := make([]interface{}, l)
+	for i := 0; i < l; i++ {
+		result[i] = r.At(i).Unstructured()
+	}
+	return result
+}
+
+func (r listReflect) Range() ListRange {
+	return r.RangeUsing(HeapAllocator)
+}
+
+func (r listReflect) RangeUsing(a Allocator) ListRange {
+	length := r.Value.Len()
+	if length == 0 {
+		return EmptyRange
+	}
+	rr := a.allocListReflectRange()
+	rr.list = r.Value
+	rr.i = -1
+	rr.entry = TypeReflectEntryOf(r.Value.Type().Elem())
+	return rr
+}
+
+func (r listReflect) Equals(other List) bool {
+	return r.EqualsUsing(HeapAllocator, other)
+}
+func (r listReflect) EqualsUsing(a Allocator, other List) bool {
+	if otherReflectList, ok := other.(*listReflect); ok {
+		return reflect.DeepEqual(r.Value.Interface(), otherReflectList.Value.Interface())
+	}
+	return ListEqualsUsing(a, &r, other)
+}
+
+type listReflectRange struct {
+	list  reflect.Value
+	vr    *valueReflect
+	i     int
+	entry *TypeReflectCacheEntry
+}
+
+func (r *listReflectRange) Next() bool {
+	r.i += 1
+	return r.i < r.list.Len()
+}
+
+func (r *listReflectRange) Item() (index int, value Value) {
+	if r.i < 0 {
+		panic("Item() called before first calling Next()")
+	}
+	if r.i >= r.list.Len() {
+		panic("Item() called on ListRange with no more items")
+	}
+	v := r.list.Index(r.i)
+	return r.i, r.vr.mustReuse(v, r.entry, nil, nil)
+}
diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v3/value/listunstructured.go b/vendor/sigs.k8s.io/structured-merge-diff/v3/value/listunstructured.go
new file mode 100644
index 00000000..64cd8e7c
--- /dev/null
+++ b/vendor/sigs.k8s.io/structured-merge-diff/v3/value/listunstructured.go
@@ -0,0 +1,74 @@
+/*
+Copyright 2019 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package value
+
+type listUnstructured []interface{}
+
+func (l listUnstructured) Length() int {
+	return len(l)
+}
+
+func (l listUnstructured) At(i int) Value {
+	return NewValueInterface(l[i])
+}
+
+func (l listUnstructured) AtUsing(a Allocator, i int) Value {
+	return a.allocValueUnstructured().reuse(l[i])
+}
+
+func (l listUnstructured) Equals(other List) bool {
+	return l.EqualsUsing(HeapAllocator, other)
+}
+
+func (l listUnstructured) EqualsUsing(a Allocator, other List) bool {
+	return ListEqualsUsing(a, &l, other)
+}
+
+func (l listUnstructured) Range() ListRange {
+	return l.RangeUsing(HeapAllocator)
+}
+
+func (l listUnstructured) RangeUsing(a Allocator) ListRange {
+	if len(l) == 0 {
+		return EmptyRange
+	}
+	r := a.allocListUnstructuredRange()
+	r.list = l
+	r.i = -1
+	return r
+}
+
+type listUnstructuredRange struct {
+	list listUnstructured
+	vv   *valueUnstructured
+	i    int
+}
+
+func (r *listUnstructuredRange) Next() bool {
+	r.i += 1
+	return r.i < len(r.list)
+}
+
+func (r *listUnstructuredRange) Item() (index int, value Value) {
+	if r.i < 0 {
+		panic("Item() called before first calling Next()")
+	}
+	if r.i >= len(r.list) {
+		panic("Item() called on ListRange with no more items")
+	}
+	return r.i, r.vv.reuse(r.list[r.i])
+}
diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v3/value/map.go b/vendor/sigs.k8s.io/structured-merge-diff/v3/value/map.go
new file mode 100644
index 00000000..168b9fa0
--- /dev/null
+++ b/vendor/sigs.k8s.io/structured-merge-diff/v3/value/map.go
@@ -0,0 +1,270 @@
+/*
+Copyright 2019 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package value
+
+import (
+	"sort"
+)
+
+// Map represents a Map or go structure.
+type Map interface {
+	// Set changes or set the value of the given key.
+	Set(key string, val Value)
+	// Get returns the value for the given key, if present, or (nil, false) otherwise.
+	Get(key string) (Value, bool)
+	// GetUsing uses the provided allocator and returns the value for the given key,
+	// if present, or (nil, false) otherwise.
+	// The returned Value should be given back to the Allocator when no longer needed
+	// by calling Allocator.Free(Value).
+	GetUsing(a Allocator, key string) (Value, bool)
+	// Has returns true if the key is present, or false otherwise.
+	Has(key string) bool
+	// Delete removes the key from the map.
+	Delete(key string)
+	// Equals compares the two maps, and return true if they are the same, false otherwise.
+	// Implementations can use MapEquals as a general implementation for this methods.
+	Equals(other Map) bool
+	// EqualsUsing uses the provided allocator and compares the two maps, and return true if
+	// they are the same, false otherwise. Implementations can use MapEqualsUsing as a general
+	// implementation for this methods.
+	EqualsUsing(a Allocator, other Map) bool
+	// Iterate runs the given function for each key/value in the
+	// map. Returning false in the closure prematurely stops the
+	// iteration.
+	Iterate(func(key string, value Value) bool) bool
+	// IterateUsing uses the provided allocator and runs the given function for each key/value
+	// in the map. Returning false in the closure prematurely stops the iteration.
+	IterateUsing(Allocator, func(key string, value Value) bool) bool
+	// Length returns the number of items in the map.
+	Length() int
+	// Empty returns true if the map is empty.
+	Empty() bool
+	// Zip iterates over the entries of two maps together. If both maps contain a value for a given key, fn is called
+	// with the values from both maps, otherwise it is called with the value of the map that contains the key and nil
+	// for the map that does not contain the key. Returning false in the closure prematurely stops the iteration.
+	Zip(other Map, order MapTraverseOrder, fn func(key string, lhs, rhs Value) bool) bool
+	// ZipUsing uses the provided allocator and iterates over the entries of two maps together. If both maps
+	// contain a value for a given key, fn is called with the values from both maps, otherwise it is called with
+	// the value of the map that contains the key and nil for the map that does not contain the key. Returning
+	// false in the closure prematurely stops the iteration.
+	ZipUsing(a Allocator, other Map, order MapTraverseOrder, fn func(key string, lhs, rhs Value) bool) bool
+}
+
+// MapTraverseOrder defines the map traversal ordering available.
+type MapTraverseOrder int
+
+const (
+	// Unordered indicates that the map traversal has no ordering requirement.
+	Unordered = iota
+	// LexicalKeyOrder indicates that the map traversal is ordered by key, lexically.
+	LexicalKeyOrder
+)
+
+// MapZip iterates over the entries of two maps together. If both maps contain a value for a given key, fn is called
+// with the values from both maps, otherwise it is called with the value of the map that contains the key and nil
+// for the other map. Returning false in the closure prematurely stops the iteration.
+func MapZip(lhs, rhs Map, order MapTraverseOrder, fn func(key string, lhs, rhs Value) bool) bool {
+	return MapZipUsing(HeapAllocator, lhs, rhs, order, fn)
+}
+
+// MapZipUsing uses the provided allocator and iterates over the entries of two maps together. If both maps
+// contain a value for a given key, fn is called with the values from both maps, otherwise it is called with
+// the value of the map that contains the key and nil for the other map. Returning false in the closure
+// prematurely stops the iteration.
+func MapZipUsing(a Allocator, lhs, rhs Map, order MapTraverseOrder, fn func(key string, lhs, rhs Value) bool) bool {
+	if lhs != nil {
+		return lhs.ZipUsing(a, rhs, order, fn)
+	}
+	if rhs != nil {
+		return rhs.ZipUsing(a, lhs, order, func(key string, rhs, lhs Value) bool { // arg positions of lhs and rhs deliberately swapped
+			return fn(key, lhs, rhs)
+		})
+	}
+	return true
+}
+
+// defaultMapZip provides a default implementation of Zip for implementations that do not need to provide
+// their own optimized implementation.
+func defaultMapZip(a Allocator, lhs, rhs Map, order MapTraverseOrder, fn func(key string, lhs, rhs Value) bool) bool {
+	switch order {
+	case Unordered:
+		return unorderedMapZip(a, lhs, rhs, fn)
+	case LexicalKeyOrder:
+		return lexicalKeyOrderedMapZip(a, lhs, rhs, fn)
+	default:
+		panic("Unsupported map order")
+	}
+}
+
+func unorderedMapZip(a Allocator, lhs, rhs Map, fn func(key string, lhs, rhs Value) bool) bool {
+	if (lhs == nil || lhs.Empty()) && (rhs == nil || rhs.Empty()) {
+		return true
+	}
+
+	if lhs != nil {
+		ok := lhs.IterateUsing(a, func(key string, lhsValue Value) bool {
+			var rhsValue Value
+			if rhs != nil {
+				if item, ok := rhs.GetUsing(a, key); ok {
+					rhsValue = item
+					defer a.Free(rhsValue)
+				}
+			}
+			return fn(key, lhsValue, rhsValue)
+		})
+		if !ok {
+			return false
+		}
+	}
+	if rhs != nil {
+		return rhs.IterateUsing(a, func(key string, rhsValue Value) bool {
+			if lhs == nil || !lhs.Has(key) {
+				return fn(key, nil, rhsValue)
+			}
+			return true
+		})
+	}
+	return true
+}
+
+func lexicalKeyOrderedMapZip(a Allocator, lhs, rhs Map, fn func(key string, lhs, rhs Value) bool) bool {
+	var lhsLength, rhsLength int
+	var orderedLength int // rough estimate of length of union of map keys
+	if lhs != nil {
+		lhsLength = lhs.Length()
+		orderedLength = lhsLength
+	}
+	if rhs != nil {
+		rhsLength = rhs.Length()
+		if rhsLength > orderedLength {
+			orderedLength = rhsLength
+		}
+	}
+	if lhsLength == 0 && rhsLength == 0 {
+		return true
+	}
+
+	ordered := make([]string, 0, orderedLength)
+	if lhs != nil {
+		lhs.IterateUsing(a, func(key string, _ Value) bool {
+			ordered = append(ordered, key)
+			return true
+		})
+	}
+	if rhs != nil {
+		rhs.IterateUsing(a, func(key string, _ Value) bool {
+			if lhs == nil || !lhs.Has(key) {
+				ordered = append(ordered, key)
+			}
+			return true
+		})
+	}
+	sort.Strings(ordered)
+	for _, key := range ordered {
+		var litem, ritem Value
+		if lhs != nil {
+			litem, _ = lhs.GetUsing(a, key)
+		}
+		if rhs != nil {
+			ritem, _ = rhs.GetUsing(a, key)
+		}
+		ok := fn(key, litem, ritem)
+		if litem != nil {
+			a.Free(litem)
+		}
+		if ritem != nil {
+			a.Free(ritem)
+		}
+		if !ok {
+			return false
+		}
+	}
+	return true
+}
+
+// MapLess compares two maps lexically.
+func MapLess(lhs, rhs Map) bool {
+	return MapCompare(lhs, rhs) == -1
+}
+
+// MapCompare compares two maps lexically.
+func MapCompare(lhs, rhs Map) int {
+	return MapCompareUsing(HeapAllocator, lhs, rhs)
+}
+
+// MapCompareUsing uses the provided allocator and compares two maps lexically.
+func MapCompareUsing(a Allocator, lhs, rhs Map) int {
+	c := 0
+	var llength, rlength int
+	if lhs != nil {
+		llength = lhs.Length()
+	}
+	if rhs != nil {
+		rlength = rhs.Length()
+	}
+	if llength == 0 && rlength == 0 {
+		return 0
+	}
+	i := 0
+	MapZipUsing(a, lhs, rhs, LexicalKeyOrder, func(key string, lhs, rhs Value) bool {
+		switch {
+		case i == llength:
+			c = -1
+		case i == rlength:
+			c = 1
+		case lhs == nil:
+			c = 1
+		case rhs == nil:
+			c = -1
+		default:
+			c = CompareUsing(a, lhs, rhs)
+		}
+		i++
+		return c == 0
+	})
+	return c
+}
+
+// MapEquals returns true if lhs == rhs, false otherwise. This function
+// acts on generic types and should not be used by callers, but can help
+// implement Map.Equals.
+// WARN: This is a naive implementation, calling lhs.Equals(rhs) is typically the most efficient.
+func MapEquals(lhs, rhs Map) bool {
+	return MapEqualsUsing(HeapAllocator, lhs, rhs)
+}
+
+// MapEqualsUsing uses the provided allocator and returns true if lhs == rhs,
+// false otherwise. This function acts on generic types and should not be used
+// by callers, but can help implement Map.Equals.
+// WARN: This is a naive implementation, calling lhs.EqualsUsing(allocator, rhs) is typically the most efficient.
+func MapEqualsUsing(a Allocator, lhs, rhs Map) bool {
+	if lhs == nil && rhs == nil {
+		return true
+	}
+	if lhs == nil || rhs == nil {
+		return false
+	}
+	if lhs.Length() != rhs.Length() {
+		return false
+	}
+	return MapZipUsing(a, lhs, rhs, Unordered, func(key string, lhs, rhs Value) bool {
+		if lhs == nil || rhs == nil {
+			return false
+		}
+		return EqualsUsing(a, lhs, rhs)
+	})
+}
diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v3/value/mapreflect.go b/vendor/sigs.k8s.io/structured-merge-diff/v3/value/mapreflect.go
new file mode 100644
index 00000000..dc8b8c72
--- /dev/null
+++ b/vendor/sigs.k8s.io/structured-merge-diff/v3/value/mapreflect.go
@@ -0,0 +1,209 @@
+/*
+Copyright 2019 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package value
+
+import (
+	"reflect"
+)
+
+type mapReflect struct {
+	valueReflect
+}
+
+func (r mapReflect) Length() int {
+	val := r.Value
+	return val.Len()
+}
+
+func (r mapReflect) Empty() bool {
+	val := r.Value
+	return val.Len() == 0
+}
+
+func (r mapReflect) Get(key string) (Value, bool) {
+	return r.GetUsing(HeapAllocator, key)
+}
+
+func (r mapReflect) GetUsing(a Allocator, key string) (Value, bool) {
+	k, v, ok := r.get(key)
+	if !ok {
+		return nil, false
+	}
+	return a.allocValueReflect().mustReuse(v, nil, &r.Value, &k), true
+}
+
+func (r mapReflect) get(k string) (key, value reflect.Value, ok bool) {
+	mapKey := r.toMapKey(k)
+	val := r.Value.MapIndex(mapKey)
+	return mapKey, val, val.IsValid() && val != reflect.Value{}
+}
+
+func (r mapReflect) Has(key string) bool {
+	var val reflect.Value
+	val = r.Value.MapIndex(r.toMapKey(key))
+	if !val.IsValid() {
+		return false
+	}
+	return val != reflect.Value{}
+}
+
+func (r mapReflect) Set(key string, val Value) {
+	r.Value.SetMapIndex(r.toMapKey(key), reflect.ValueOf(val.Unstructured()))
+}
+
+func (r mapReflect) Delete(key string) {
+	val := r.Value
+	val.SetMapIndex(r.toMapKey(key), reflect.Value{})
+}
+
+// TODO: Do we need to support types that implement json.Marshaler and are used as string keys?
+func (r mapReflect) toMapKey(key string) reflect.Value {
+	val := r.Value
+	return reflect.ValueOf(key).Convert(val.Type().Key())
+}
+
+func (r mapReflect) Iterate(fn func(string, Value) bool) bool {
+	return r.IterateUsing(HeapAllocator, fn)
+}
+
+func (r mapReflect) IterateUsing(a Allocator, fn func(string, Value) bool) bool {
+	if r.Value.Len() == 0 {
+		return true
+	}
+	v := a.allocValueReflect()
+	defer a.Free(v)
+	return eachMapEntry(r.Value, func(e *TypeReflectCacheEntry, key reflect.Value, value reflect.Value) bool {
+		return fn(key.String(), v.mustReuse(value, e, &r.Value, &key))
+	})
+}
+
+func eachMapEntry(val reflect.Value, fn func(*TypeReflectCacheEntry, reflect.Value, reflect.Value) bool) bool {
+	iter := val.MapRange()
+	entry := TypeReflectEntryOf(val.Type().Elem())
+	for iter.Next() {
+		next := iter.Value()
+		if !next.IsValid() {
+			continue
+		}
+		if !fn(entry, iter.Key(), next) {
+			return false
+		}
+	}
+	return true
+}
+
+func (r mapReflect) Unstructured() interface{} {
+	result := make(map[string]interface{}, r.Length())
+	r.Iterate(func(s string, value Value) bool {
+		result[s] = value.Unstructured()
+		return true
+	})
+	return result
+}
+
+func (r mapReflect) Equals(m Map) bool {
+	return r.EqualsUsing(HeapAllocator, m)
+}
+
+func (r mapReflect) EqualsUsing(a Allocator, m Map) bool {
+	lhsLength := r.Length()
+	rhsLength := m.Length()
+	if lhsLength != rhsLength {
+		return false
+	}
+	if lhsLength == 0 {
+		return true
+	}
+	vr := a.allocValueReflect()
+	defer a.Free(vr)
+	entry := TypeReflectEntryOf(r.Value.Type().Elem())
+	return m.Iterate(func(key string, value Value) bool {
+		_, lhsVal, ok := r.get(key)
+		if !ok {
+			return false
+		}
+		return Equals(vr.mustReuse(lhsVal, entry, nil, nil), value)
+	})
+}
+
+func (r mapReflect) Zip(other Map, order MapTraverseOrder, fn func(key string, lhs, rhs Value) bool) bool {
+	return r.ZipUsing(HeapAllocator, other, order, fn)
+}
+
+func (r mapReflect) ZipUsing(a Allocator, other Map, order MapTraverseOrder, fn func(key string, lhs, rhs Value) bool) bool {
+	if otherMapReflect, ok := other.(*mapReflect); ok && order == Unordered {
+		return r.unorderedReflectZip(a, otherMapReflect, fn)
+	}
+	return defaultMapZip(a, &r, other, order, fn)
+}
+
+// unorderedReflectZip provides an optimized unordered zip for mapReflect types.
+func (r mapReflect) unorderedReflectZip(a Allocator, other *mapReflect, fn func(key string, lhs, rhs Value) bool) bool {
+	if r.Empty() && (other == nil || other.Empty()) {
+		return true
+	}
+
+	lhs := r.Value
+	lhsEntry := TypeReflectEntryOf(lhs.Type().Elem())
+
+	// map lookup via reflection is expensive enough that it is better to keep track of visited keys
+	visited := map[string]struct{}{}
+
+	vlhs, vrhs := a.allocValueReflect(), a.allocValueReflect()
+	defer a.Free(vlhs)
+	defer a.Free(vrhs)
+
+	if other != nil {
+		rhs := other.Value
+		rhsEntry := TypeReflectEntryOf(rhs.Type().Elem())
+		iter := rhs.MapRange()
+
+		for iter.Next() {
+			key := iter.Key()
+			keyString := key.String()
+			next := iter.Value()
+			if !next.IsValid() {
+				continue
+			}
+			rhsVal := vrhs.mustReuse(next, rhsEntry, &rhs, &key)
+			visited[keyString] = struct{}{}
+			var lhsVal Value
+			if _, v, ok := r.get(keyString); ok {
+				lhsVal = vlhs.mustReuse(v, lhsEntry, &lhs, &key)
+			}
+			if !fn(keyString, lhsVal, rhsVal) {
+				return false
+			}
+		}
+	}
+
+	iter := lhs.MapRange()
+	for iter.Next() {
+		key := iter.Key()
+		if _, ok := visited[key.String()]; ok {
+			continue
+		}
+		next := iter.Value()
+		if !next.IsValid() {
+			continue
+		}
+		if !fn(key.String(), vlhs.mustReuse(next, lhsEntry, &lhs, &key), nil) {
+			return false
+		}
+	}
+	return true
+}
diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v3/value/mapunstructured.go b/vendor/sigs.k8s.io/structured-merge-diff/v3/value/mapunstructured.go
new file mode 100644
index 00000000..d8e20862
--- /dev/null
+++ b/vendor/sigs.k8s.io/structured-merge-diff/v3/value/mapunstructured.go
@@ -0,0 +1,190 @@
+/*
+Copyright 2019 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package value
+
+type mapUnstructuredInterface map[interface{}]interface{}
+
+func (m mapUnstructuredInterface) Set(key string, val Value) {
+	m[key] = val.Unstructured()
+}
+
+func (m mapUnstructuredInterface) Get(key string) (Value, bool) {
+	return m.GetUsing(HeapAllocator, key)
+}
+
+func (m mapUnstructuredInterface) GetUsing(a Allocator, key string) (Value, bool) {
+	if v, ok := m[key]; !ok {
+		return nil, false
+	} else {
+		return a.allocValueUnstructured().reuse(v), true
+	}
+}
+
+func (m mapUnstructuredInterface) Has(key string) bool {
+	_, ok := m[key]
+	return ok
+}
+
+func (m mapUnstructuredInterface) Delete(key string) {
+	delete(m, key)
+}
+
+func (m mapUnstructuredInterface) Iterate(fn func(key string, value Value) bool) bool {
+	return m.IterateUsing(HeapAllocator, fn)
+}
+
+func (m mapUnstructuredInterface) IterateUsing(a Allocator, fn func(key string, value Value) bool) bool {
+	if len(m) == 0 {
+		return true
+	}
+	vv := a.allocValueUnstructured()
+	defer a.Free(vv)
+	for k, v := range m {
+		if ks, ok := k.(string); !ok {
+			continue
+		} else {
+			if !fn(ks, vv.reuse(v)) {
+				return false
+			}
+		}
+	}
+	return true
+}
+
+func (m mapUnstructuredInterface) Length() int {
+	return len(m)
+}
+
+func (m mapUnstructuredInterface) Empty() bool {
+	return len(m) == 0
+}
+
+func (m mapUnstructuredInterface) Equals(other Map) bool {
+	return m.EqualsUsing(HeapAllocator, other)
+}
+
+func (m mapUnstructuredInterface) EqualsUsing(a Allocator, other Map) bool {
+	lhsLength := m.Length()
+	rhsLength := other.Length()
+	if lhsLength != rhsLength {
+		return false
+	}
+	if lhsLength == 0 {
+		return true
+	}
+	vv := a.allocValueUnstructured()
+	defer a.Free(vv)
+	return other.Iterate(func(key string, value Value) bool {
+		lhsVal, ok := m[key]
+		if !ok {
+			return false
+		}
+		return Equals(vv.reuse(lhsVal), value)
+	})
+}
+
+func (m mapUnstructuredInterface) Zip(other Map, order MapTraverseOrder, fn func(key string, lhs, rhs Value) bool) bool {
+	return m.ZipUsing(HeapAllocator, other, order, fn)
+}
+
+func (m mapUnstructuredInterface) ZipUsing(a Allocator, other Map, order MapTraverseOrder, fn func(key string, lhs, rhs Value) bool) bool {
+	return defaultMapZip(a, m, other, order, fn)
+}
+
+type mapUnstructuredString map[string]interface{}
+
+func (m mapUnstructuredString) Set(key string, val Value) {
+	m[key] = val.Unstructured()
+}
+
+func (m mapUnstructuredString) Get(key string) (Value, bool) {
+	return m.GetUsing(HeapAllocator, key)
+}
+func (m mapUnstructuredString) GetUsing(a Allocator, key string) (Value, bool) {
+	if v, ok := m[key]; !ok {
+		return nil, false
+	} else {
+		return a.allocValueUnstructured().reuse(v), true
+	}
+}
+
+func (m mapUnstructuredString) Has(key string) bool {
+	_, ok := m[key]
+	return ok
+}
+
+func (m mapUnstructuredString) Delete(key string) {
+	delete(m, key)
+}
+
+func (m mapUnstructuredString) Iterate(fn func(key string, value Value) bool) bool {
+	return m.IterateUsing(HeapAllocator, fn)
+}
+
+func (m mapUnstructuredString) IterateUsing(a Allocator, fn func(key string, value Value) bool) bool {
+	if len(m) == 0 {
+		return true
+	}
+	vv := a.allocValueUnstructured()
+	defer a.Free(vv)
+	for k, v := range m {
+		if !fn(k, vv.reuse(v)) {
+			return false
+		}
+	}
+	return true
+}
+
+func (m mapUnstructuredString) Length() int {
+	return len(m)
+}
+
+func (m mapUnstructuredString) Equals(other Map) bool {
+	return m.EqualsUsing(HeapAllocator, other)
+}
+
+func (m mapUnstructuredString) EqualsUsing(a Allocator, other Map) bool {
+	lhsLength := m.Length()
+	rhsLength := other.Length()
+	if lhsLength != rhsLength {
+		return false
+	}
+	if lhsLength == 0 {
+		return true
+	}
+	vv := a.allocValueUnstructured()
+	defer a.Free(vv)
+	return other.Iterate(func(key string, value Value) bool {
+		lhsVal, ok := m[key]
+		if !ok {
+			return false
+		}
+		return Equals(vv.reuse(lhsVal), value)
+	})
+}
+
+func (m mapUnstructuredString) Zip(other Map, order MapTraverseOrder, fn func(key string, lhs, rhs Value) bool) bool {
+	return m.ZipUsing(HeapAllocator, other, order, fn)
+}
+
+func (m mapUnstructuredString) ZipUsing(a Allocator, other Map, order MapTraverseOrder, fn func(key string, lhs, rhs Value) bool) bool {
+	return defaultMapZip(a, m, other, order, fn)
+}
+
+func (m mapUnstructuredString) Empty() bool {
+	return len(m) == 0
+}
diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v3/value/reflectcache.go b/vendor/sigs.k8s.io/structured-merge-diff/v3/value/reflectcache.go
new file mode 100644
index 00000000..49e6dd16
--- /dev/null
+++ b/vendor/sigs.k8s.io/structured-merge-diff/v3/value/reflectcache.go
@@ -0,0 +1,463 @@
+/*
+Copyright 2020 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package value
+
+import (
+	"bytes"
+	"encoding/json"
+	"fmt"
+	"reflect"
+	"sort"
+	"sync"
+	"sync/atomic"
+)
+
+// UnstructuredConverter defines how a type can be converted directly to unstructured.
+// Types that implement json.Marshaler may also optionally implement this interface to provide a more
+// direct and more efficient conversion. All types that choose to implement this interface must still
+// implement this same conversion via json.Marshaler.
+type UnstructuredConverter interface {
+	json.Marshaler // require that json.Marshaler is implemented
+
+	// ToUnstructured returns the unstructured representation.
+	ToUnstructured() interface{}
+}
+
+// TypeReflectCacheEntry keeps data gathered using reflection about how a type is converted to/from unstructured.
+type TypeReflectCacheEntry struct {
+	isJsonMarshaler        bool
+	ptrIsJsonMarshaler     bool
+	isJsonUnmarshaler      bool
+	ptrIsJsonUnmarshaler   bool
+	isStringConvertable    bool
+	ptrIsStringConvertable bool
+
+	structFields        map[string]*FieldCacheEntry
+	orderedStructFields []*FieldCacheEntry
+}
+
+// FieldCacheEntry keeps data gathered using reflection about how the field of a struct is converted to/from
+// unstructured.
+type FieldCacheEntry struct {
+	// JsonName returns the name of the field according to the json tags on the struct field.
+	JsonName string
+	// isOmitEmpty is true if the field has the json 'omitempty' tag.
+	isOmitEmpty bool
+	// fieldPath is a list of field indices (see FieldByIndex) to lookup the value of
+	// a field in a reflect.Value struct. The field indices in the list form a path used
+	// to traverse through intermediary 'inline' fields.
+	fieldPath [][]int
+
+	fieldType reflect.Type
+	TypeEntry *TypeReflectCacheEntry
+}
+
+func (f *FieldCacheEntry) CanOmit(fieldVal reflect.Value) bool {
+	return f.isOmitEmpty && (safeIsNil(fieldVal) || isZero(fieldVal))
+}
+
+// GetUsing returns the field identified by this FieldCacheEntry from the provided struct.
+func (f *FieldCacheEntry) GetFrom(structVal reflect.Value) reflect.Value {
+	// field might be nested within 'inline' structs
+	for _, elem := range f.fieldPath {
+		structVal = structVal.FieldByIndex(elem)
+	}
+	return structVal
+}
+
+var marshalerType = reflect.TypeOf(new(json.Marshaler)).Elem()
+var unmarshalerType = reflect.TypeOf(new(json.Unmarshaler)).Elem()
+var unstructuredConvertableType = reflect.TypeOf(new(UnstructuredConverter)).Elem()
+var defaultReflectCache = newReflectCache()
+
+// TypeReflectEntryOf returns the TypeReflectCacheEntry of the provided reflect.Type.
+func TypeReflectEntryOf(t reflect.Type) *TypeReflectCacheEntry {
+	cm := defaultReflectCache.get()
+	if record, ok := cm[t]; ok {
+		return record
+	}
+	updates := reflectCacheMap{}
+	result := typeReflectEntryOf(cm, t, updates)
+	if len(updates) > 0 {
+		defaultReflectCache.update(updates)
+	}
+	return result
+}
+
+// TypeReflectEntryOf returns all updates needed to add provided reflect.Type, and the types its fields transitively
+// depend on, to the cache.
+func typeReflectEntryOf(cm reflectCacheMap, t reflect.Type, updates reflectCacheMap) *TypeReflectCacheEntry {
+	if record, ok := cm[t]; ok {
+		return record
+	}
+	if record, ok := updates[t]; ok {
+		return record
+	}
+	typeEntry := &TypeReflectCacheEntry{
+		isJsonMarshaler:        t.Implements(marshalerType),
+		ptrIsJsonMarshaler:     reflect.PtrTo(t).Implements(marshalerType),
+		isJsonUnmarshaler:      reflect.PtrTo(t).Implements(unmarshalerType),
+		isStringConvertable:    t.Implements(unstructuredConvertableType),
+		ptrIsStringConvertable: reflect.PtrTo(t).Implements(unstructuredConvertableType),
+	}
+	if t.Kind() == reflect.Struct {
+		fieldEntries := map[string]*FieldCacheEntry{}
+		buildStructCacheEntry(t, fieldEntries, nil)
+		typeEntry.structFields = fieldEntries
+		sortedByJsonName := make([]*FieldCacheEntry, len(fieldEntries))
+		i := 0
+		for _, entry := range fieldEntries {
+			sortedByJsonName[i] = entry
+			i++
+		}
+		sort.Slice(sortedByJsonName, func(i, j int) bool {
+			return sortedByJsonName[i].JsonName < sortedByJsonName[j].JsonName
+		})
+		typeEntry.orderedStructFields = sortedByJsonName
+	}
+
+	// cyclic type references are allowed, so we must add the typeEntry to the updates map before resolving
+	// the field.typeEntry references, or creating them if they are not already in the cache
+	updates[t] = typeEntry
+
+	for _, field := range typeEntry.structFields {
+		if field.TypeEntry == nil {
+			field.TypeEntry = typeReflectEntryOf(cm, field.fieldType, updates)
+		}
+	}
+	return typeEntry
+}
+
+func buildStructCacheEntry(t reflect.Type, infos map[string]*FieldCacheEntry, fieldPath [][]int) {
+	for i := 0; i < t.NumField(); i++ {
+		field := t.Field(i)
+		jsonName, omit, isInline, isOmitempty := lookupJsonTags(field)
+		if omit {
+			continue
+		}
+		if isInline {
+			buildStructCacheEntry(field.Type, infos, append(fieldPath, field.Index))
+			continue
+		}
+		info := &FieldCacheEntry{JsonName: jsonName, isOmitEmpty: isOmitempty, fieldPath: append(fieldPath, field.Index), fieldType: field.Type}
+		infos[jsonName] = info
+	}
+}
+
+// Fields returns a map of JSON field name to FieldCacheEntry for structs, or nil for non-structs.
+func (e TypeReflectCacheEntry) Fields() map[string]*FieldCacheEntry {
+	return e.structFields
+}
+
+// Fields returns a map of JSON field name to FieldCacheEntry for structs, or nil for non-structs.
+func (e TypeReflectCacheEntry) OrderedFields() []*FieldCacheEntry {
+	return e.orderedStructFields
+}
+
+// CanConvertToUnstructured returns true if this TypeReflectCacheEntry can convert values of its type to unstructured.
+func (e TypeReflectCacheEntry) CanConvertToUnstructured() bool {
+	return e.isJsonMarshaler || e.ptrIsJsonMarshaler || e.isStringConvertable || e.ptrIsStringConvertable
+}
+
+// ToUnstructured converts the provided value to unstructured and returns it.
+func (e TypeReflectCacheEntry) ToUnstructured(sv reflect.Value) (interface{}, error) {
+	// This is based on https://github.com/kubernetes/kubernetes/blob/82c9e5c814eb7acc6cc0a090c057294d0667ad66/staging/src/k8s.io/apimachinery/pkg/runtime/converter.go#L505
+	// and is intended to replace it.
+
+	// Check if the object has a custom string converter and use it if available, since it is much more efficient
+	// than round tripping through json.
+	if converter, ok := e.getUnstructuredConverter(sv); ok {
+		return converter.ToUnstructured(), nil
+	}
+	// Check if the object has a custom JSON marshaller/unmarshaller.
+	if marshaler, ok := e.getJsonMarshaler(sv); ok {
+		if sv.Kind() == reflect.Ptr && sv.IsNil() {
+			// We're done - we don't need to store anything.
+			return nil, nil
+		}
+
+		data, err := marshaler.MarshalJSON()
+		if err != nil {
+			return nil, err
+		}
+		switch {
+		case len(data) == 0:
+			return nil, fmt.Errorf("error decoding from json: empty value")
+
+		case bytes.Equal(data, nullBytes):
+			// We're done - we don't need to store anything.
+			return nil, nil
+
+		case bytes.Equal(data, trueBytes):
+			return true, nil
+
+		case bytes.Equal(data, falseBytes):
+			return false, nil
+
+		case data[0] == '"':
+			var result string
+			err := unmarshal(data, &result)
+			if err != nil {
+				return nil, fmt.Errorf("error decoding string from json: %v", err)
+			}
+			return result, nil
+
+		case data[0] == '{':
+			result := make(map[string]interface{})
+			err := unmarshal(data, &result)
+			if err != nil {
+				return nil, fmt.Errorf("error decoding object from json: %v", err)
+			}
+			return result, nil
+
+		case data[0] == '[':
+			result := make([]interface{}, 0)
+			err := unmarshal(data, &result)
+			if err != nil {
+				return nil, fmt.Errorf("error decoding array from json: %v", err)
+			}
+			return result, nil
+
+		default:
+			var (
+				resultInt   int64
+				resultFloat float64
+				err         error
+			)
+			if err = unmarshal(data, &resultInt); err == nil {
+				return resultInt, nil
+			} else if err = unmarshal(data, &resultFloat); err == nil {
+				return resultFloat, nil
+			} else {
+				return nil, fmt.Errorf("error decoding number from json: %v", err)
+			}
+		}
+	}
+
+	return nil, fmt.Errorf("provided type cannot be converted: %v", sv.Type())
+}
+
+// CanConvertFromUnstructured returns true if this TypeReflectCacheEntry can convert objects of the type from unstructured.
+func (e TypeReflectCacheEntry) CanConvertFromUnstructured() bool {
+	return e.isJsonUnmarshaler
+}
+
+// FromUnstructured converts the provided source value from unstructured into the provided destination value.
+func (e TypeReflectCacheEntry) FromUnstructured(sv, dv reflect.Value) error {
+	// TODO: this could be made much more efficient using direct conversions like
+	// UnstructuredConverter.ToUnstructured provides.
+	st := dv.Type()
+	data, err := json.Marshal(sv.Interface())
+	if err != nil {
+		return fmt.Errorf("error encoding %s to json: %v", st.String(), err)
+	}
+	if unmarshaler, ok := e.getJsonUnmarshaler(dv); ok {
+		return unmarshaler.UnmarshalJSON(data)
+	}
+	return fmt.Errorf("unable to unmarshal %v into %v", sv.Type(), dv.Type())
+}
+
+var (
+	nullBytes  = []byte("null")
+	trueBytes  = []byte("true")
+	falseBytes = []byte("false")
+)
+
+func (e TypeReflectCacheEntry) getJsonMarshaler(v reflect.Value) (json.Marshaler, bool) {
+	if e.isJsonMarshaler {
+		return v.Interface().(json.Marshaler), true
+	}
+	if e.ptrIsJsonMarshaler {
+		// Check pointer receivers if v is not a pointer
+		if v.Kind() != reflect.Ptr && v.CanAddr() {
+			v = v.Addr()
+			return v.Interface().(json.Marshaler), true
+		}
+	}
+	return nil, false
+}
+
+func (e TypeReflectCacheEntry) getJsonUnmarshaler(v reflect.Value) (json.Unmarshaler, bool) {
+	if !e.isJsonUnmarshaler {
+		return nil, false
+	}
+	return v.Addr().Interface().(json.Unmarshaler), true
+}
+
+func (e TypeReflectCacheEntry) getUnstructuredConverter(v reflect.Value) (UnstructuredConverter, bool) {
+	if e.isStringConvertable {
+		return v.Interface().(UnstructuredConverter), true
+	}
+	if e.ptrIsStringConvertable {
+		// Check pointer receivers if v is not a pointer
+		if v.CanAddr() {
+			v = v.Addr()
+			return v.Interface().(UnstructuredConverter), true
+		}
+	}
+	return nil, false
+}
+
+type typeReflectCache struct {
+	// use an atomic and copy-on-write since there are a fixed (typically very small) number of structs compiled into any
+	// go program using this cache
+	value atomic.Value
+	// mu is held by writers when performing load/modify/store operations on the cache, readers do not need to hold a
+	// read-lock since the atomic value is always read-only
+	mu sync.Mutex
+}
+
+func newReflectCache() *typeReflectCache {
+	cache := &typeReflectCache{}
+	cache.value.Store(make(reflectCacheMap))
+	return cache
+}
+
+type reflectCacheMap map[reflect.Type]*TypeReflectCacheEntry
+
+// get returns the reflectCacheMap.
+func (c *typeReflectCache) get() reflectCacheMap {
+	return c.value.Load().(reflectCacheMap)
+}
+
+// update merges the provided updates into the cache.
+func (c *typeReflectCache) update(updates reflectCacheMap) {
+	c.mu.Lock()
+	defer c.mu.Unlock()
+
+	currentCacheMap := c.value.Load().(reflectCacheMap)
+
+	hasNewEntries := false
+	for t := range updates {
+		if _, ok := currentCacheMap[t]; !ok {
+			hasNewEntries = true
+			break
+		}
+	}
+	if !hasNewEntries {
+		// Bail if the updates have been set while waiting for lock acquisition.
+		// This is safe since setting entries is idempotent.
+		return
+	}
+
+	newCacheMap := make(reflectCacheMap, len(currentCacheMap)+len(updates))
+	for k, v := range currentCacheMap {
+		newCacheMap[k] = v
+	}
+	for t, update := range updates {
+		newCacheMap[t] = update
+	}
+	c.value.Store(newCacheMap)
+}
+
+// Below json Unmarshal is fromk8s.io/apimachinery/pkg/util/json
+// to handle number conversions as expected by Kubernetes
+
+// limit recursive depth to prevent stack overflow errors
+const maxDepth = 10000
+
+// unmarshal unmarshals the given data
+// If v is a *map[string]interface{}, numbers are converted to int64 or float64
+func unmarshal(data []byte, v interface{}) error {
+	switch v := v.(type) {
+	case *map[string]interface{}:
+		// Build a decoder from the given data
+		decoder := json.NewDecoder(bytes.NewBuffer(data))
+		// Preserve numbers, rather than casting to float64 automatically
+		decoder.UseNumber()
+		// Run the decode
+		if err := decoder.Decode(v); err != nil {
+			return err
+		}
+		// If the decode succeeds, post-process the map to convert json.Number objects to int64 or float64
+		return convertMapNumbers(*v, 0)
+
+	case *[]interface{}:
+		// Build a decoder from the given data
+		decoder := json.NewDecoder(bytes.NewBuffer(data))
+		// Preserve numbers, rather than casting to float64 automatically
+		decoder.UseNumber()
+		// Run the decode
+		if err := decoder.Decode(v); err != nil {
+			return err
+		}
+		// If the decode succeeds, post-process the map to convert json.Number objects to int64 or float64
+		return convertSliceNumbers(*v, 0)
+
+	default:
+		return json.Unmarshal(data, v)
+	}
+}
+
+// convertMapNumbers traverses the map, converting any json.Number values to int64 or float64.
+// values which are map[string]interface{} or []interface{} are recursively visited
+func convertMapNumbers(m map[string]interface{}, depth int) error {
+	if depth > maxDepth {
+		return fmt.Errorf("exceeded max depth of %d", maxDepth)
+	}
+
+	var err error
+	for k, v := range m {
+		switch v := v.(type) {
+		case json.Number:
+			m[k], err = convertNumber(v)
+		case map[string]interface{}:
+			err = convertMapNumbers(v, depth+1)
+		case []interface{}:
+			err = convertSliceNumbers(v, depth+1)
+		}
+		if err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+// convertSliceNumbers traverses the slice, converting any json.Number values to int64 or float64.
+// values which are map[string]interface{} or []interface{} are recursively visited
+func convertSliceNumbers(s []interface{}, depth int) error {
+	if depth > maxDepth {
+		return fmt.Errorf("exceeded max depth of %d", maxDepth)
+	}
+
+	var err error
+	for i, v := range s {
+		switch v := v.(type) {
+		case json.Number:
+			s[i], err = convertNumber(v)
+		case map[string]interface{}:
+			err = convertMapNumbers(v, depth+1)
+		case []interface{}:
+			err = convertSliceNumbers(v, depth+1)
+		}
+		if err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+// convertNumber converts a json.Number to an int64 or float64, or returns an error
+func convertNumber(n json.Number) (interface{}, error) {
+	// Attempt to convert to an int64 first
+	if i, err := n.Int64(); err == nil {
+		return i, nil
+	}
+	// Return a float64 (default json.Decode() behavior)
+	// An overflow will return an error
+	return n.Float64()
+}
diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v3/value/scalar.go b/vendor/sigs.k8s.io/structured-merge-diff/v3/value/scalar.go
new file mode 100644
index 00000000..c78a4c18
--- /dev/null
+++ b/vendor/sigs.k8s.io/structured-merge-diff/v3/value/scalar.go
@@ -0,0 +1,50 @@
+/*
+Copyright 2019 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package value
+
+// Compare compares floats. The result will be 0 if lhs==rhs, -1 if f <
+// rhs, and +1 if f > rhs.
+func FloatCompare(lhs, rhs float64) int {
+	if lhs > rhs {
+		return 1
+	} else if lhs < rhs {
+		return -1
+	}
+	return 0
+}
+
+// IntCompare compares integers. The result will be 0 if i==rhs, -1 if i <
+// rhs, and +1 if i > rhs.
+func IntCompare(lhs, rhs int64) int {
+	if lhs > rhs {
+		return 1
+	} else if lhs < rhs {
+		return -1
+	}
+	return 0
+}
+
+// Compare compares booleans. The result will be 0 if b==rhs, -1 if b <
+// rhs, and +1 if b > rhs.
+func BoolCompare(lhs, rhs bool) int {
+	if lhs == rhs {
+		return 0
+	} else if lhs == false {
+		return -1
+	}
+	return 1
+}
diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v3/value/structreflect.go b/vendor/sigs.k8s.io/structured-merge-diff/v3/value/structreflect.go
new file mode 100644
index 00000000..4a7bb5c6
--- /dev/null
+++ b/vendor/sigs.k8s.io/structured-merge-diff/v3/value/structreflect.go
@@ -0,0 +1,208 @@
+/*
+Copyright 2019 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package value
+
+import (
+	"fmt"
+	"reflect"
+)
+
+type structReflect struct {
+	valueReflect
+}
+
+func (r structReflect) Length() int {
+	i := 0
+	eachStructField(r.Value, func(_ *TypeReflectCacheEntry, s string, value reflect.Value) bool {
+		i++
+		return true
+	})
+	return i
+}
+
+func (r structReflect) Empty() bool {
+	return eachStructField(r.Value, func(_ *TypeReflectCacheEntry, s string, value reflect.Value) bool {
+		return false // exit early if the struct is non-empty
+	})
+}
+
+func (r structReflect) Get(key string) (Value, bool) {
+	return r.GetUsing(HeapAllocator, key)
+}
+
+func (r structReflect) GetUsing(a Allocator, key string) (Value, bool) {
+	if val, ok := r.findJsonNameField(key); ok {
+		return a.allocValueReflect().mustReuse(val, nil, nil, nil), true
+	}
+	return nil, false
+}
+
+func (r structReflect) Has(key string) bool {
+	_, ok := r.findJsonNameField(key)
+	return ok
+}
+
+func (r structReflect) Set(key string, val Value) {
+	fieldEntry, ok := TypeReflectEntryOf(r.Value.Type()).Fields()[key]
+	if !ok {
+		panic(fmt.Sprintf("key %s may not be set on struct %T: field does not exist", key, r.Value.Interface()))
+	}
+	oldVal := fieldEntry.GetFrom(r.Value)
+	newVal := reflect.ValueOf(val.Unstructured())
+	r.update(fieldEntry, key, oldVal, newVal)
+}
+
+func (r structReflect) Delete(key string) {
+	fieldEntry, ok := TypeReflectEntryOf(r.Value.Type()).Fields()[key]
+	if !ok {
+		panic(fmt.Sprintf("key %s may not be deleted on struct %T: field does not exist", key, r.Value.Interface()))
+	}
+	oldVal := fieldEntry.GetFrom(r.Value)
+	if oldVal.Kind() != reflect.Ptr && !fieldEntry.isOmitEmpty {
+		panic(fmt.Sprintf("key %s may not be deleted on struct: %T: value is neither a pointer nor an omitempty field", key, r.Value.Interface()))
+	}
+	r.update(fieldEntry, key, oldVal, reflect.Zero(oldVal.Type()))
+}
+
+func (r structReflect) update(fieldEntry *FieldCacheEntry, key string, oldVal, newVal reflect.Value) {
+	if oldVal.CanSet() {
+		oldVal.Set(newVal)
+		return
+	}
+
+	// map items are not addressable, so if a struct is contained in a map, the only way to modify it is
+	// to write a replacement fieldEntry into the map.
+	if r.ParentMap != nil {
+		if r.ParentMapKey == nil {
+			panic("ParentMapKey must not be nil if ParentMap is not nil")
+		}
+		replacement := reflect.New(r.Value.Type()).Elem()
+		fieldEntry.GetFrom(replacement).Set(newVal)
+		r.ParentMap.SetMapIndex(*r.ParentMapKey, replacement)
+		return
+	}
+
+	// This should never happen since NewValueReflect ensures that the root object reflected on is a pointer and map
+	// item replacement is handled above.
+	panic(fmt.Sprintf("key %s may not be modified on struct: %T: struct is not settable", key, r.Value.Interface()))
+}
+
+func (r structReflect) Iterate(fn func(string, Value) bool) bool {
+	return r.IterateUsing(HeapAllocator, fn)
+}
+
+func (r structReflect) IterateUsing(a Allocator, fn func(string, Value) bool) bool {
+	vr := a.allocValueReflect()
+	defer a.Free(vr)
+	return eachStructField(r.Value, func(e *TypeReflectCacheEntry, s string, value reflect.Value) bool {
+		return fn(s, vr.mustReuse(value, e, nil, nil))
+	})
+}
+
+func eachStructField(structVal reflect.Value, fn func(*TypeReflectCacheEntry, string, reflect.Value) bool) bool {
+	for _, fieldCacheEntry := range TypeReflectEntryOf(structVal.Type()).OrderedFields() {
+		fieldVal := fieldCacheEntry.GetFrom(structVal)
+		if fieldCacheEntry.CanOmit(fieldVal) {
+			// omit it
+			continue
+		}
+		ok := fn(fieldCacheEntry.TypeEntry, fieldCacheEntry.JsonName, fieldVal)
+		if !ok {
+			return false
+		}
+	}
+	return true
+}
+
+func (r structReflect) Unstructured() interface{} {
+	// Use number of struct fields as a cheap way to rough estimate map size
+	result := make(map[string]interface{}, r.Value.NumField())
+	r.Iterate(func(s string, value Value) bool {
+		result[s] = value.Unstructured()
+		return true
+	})
+	return result
+}
+
+func (r structReflect) Equals(m Map) bool {
+	return r.EqualsUsing(HeapAllocator, m)
+}
+
+func (r structReflect) EqualsUsing(a Allocator, m Map) bool {
+	// MapEquals uses zip and is fairly efficient for structReflect
+	return MapEqualsUsing(a, &r, m)
+}
+
+func (r structReflect) findJsonNameFieldAndNotEmpty(jsonName string) (reflect.Value, bool) {
+	structCacheEntry, ok := TypeReflectEntryOf(r.Value.Type()).Fields()[jsonName]
+	if !ok {
+		return reflect.Value{}, false
+	}
+	fieldVal := structCacheEntry.GetFrom(r.Value)
+	return fieldVal, !structCacheEntry.CanOmit(fieldVal)
+}
+
+func (r structReflect) findJsonNameField(jsonName string) (val reflect.Value, ok bool) {
+	structCacheEntry, ok := TypeReflectEntryOf(r.Value.Type()).Fields()[jsonName]
+	if !ok {
+		return reflect.Value{}, false
+	}
+	fieldVal := structCacheEntry.GetFrom(r.Value)
+	return fieldVal, !structCacheEntry.CanOmit(fieldVal)
+}
+
+func (r structReflect) Zip(other Map, order MapTraverseOrder, fn func(key string, lhs, rhs Value) bool) bool {
+	return r.ZipUsing(HeapAllocator, other, order, fn)
+}
+
+func (r structReflect) ZipUsing(a Allocator, other Map, order MapTraverseOrder, fn func(key string, lhs, rhs Value) bool) bool {
+	if otherStruct, ok := other.(*structReflect); ok && r.Value.Type() == otherStruct.Value.Type() {
+		lhsvr, rhsvr := a.allocValueReflect(), a.allocValueReflect()
+		defer a.Free(lhsvr)
+		defer a.Free(rhsvr)
+		return r.structZip(otherStruct, lhsvr, rhsvr, fn)
+	}
+	return defaultMapZip(a, &r, other, order, fn)
+}
+
+// structZip provides an optimized zip for structReflect types. The zip is always lexical key ordered since there is
+// no additional cost to ordering the zip for structured types.
+func (r structReflect) structZip(other *structReflect, lhsvr, rhsvr *valueReflect, fn func(key string, lhs, rhs Value) bool) bool {
+	lhsVal := r.Value
+	rhsVal := other.Value
+
+	for _, fieldCacheEntry := range TypeReflectEntryOf(lhsVal.Type()).OrderedFields() {
+		lhsFieldVal := fieldCacheEntry.GetFrom(lhsVal)
+		rhsFieldVal := fieldCacheEntry.GetFrom(rhsVal)
+		lhsOmit := fieldCacheEntry.CanOmit(lhsFieldVal)
+		rhsOmit := fieldCacheEntry.CanOmit(rhsFieldVal)
+		if lhsOmit && rhsOmit {
+			continue
+		}
+		var lhsVal, rhsVal Value
+		if !lhsOmit {
+			lhsVal = lhsvr.mustReuse(lhsFieldVal, fieldCacheEntry.TypeEntry, nil, nil)
+		}
+		if !rhsOmit {
+			rhsVal = rhsvr.mustReuse(rhsFieldVal, fieldCacheEntry.TypeEntry, nil, nil)
+		}
+		if !fn(fieldCacheEntry.JsonName, lhsVal, rhsVal) {
+			return false
+		}
+	}
+	return true
+}
diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v3/value/value.go b/vendor/sigs.k8s.io/structured-merge-diff/v3/value/value.go
new file mode 100644
index 00000000..ea79e3a0
--- /dev/null
+++ b/vendor/sigs.k8s.io/structured-merge-diff/v3/value/value.go
@@ -0,0 +1,347 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package value
+
+import (
+	"bytes"
+	"fmt"
+	"io"
+	"strings"
+
+	jsoniter "github.com/json-iterator/go"
+	"gopkg.in/yaml.v2"
+)
+
+var (
+	readPool  = jsoniter.NewIterator(jsoniter.ConfigCompatibleWithStandardLibrary).Pool()
+	writePool = jsoniter.NewStream(jsoniter.ConfigCompatibleWithStandardLibrary, nil, 1024).Pool()
+)
+
+// A Value corresponds to an 'atom' in the schema. It should return true
+// for at least one of the IsXXX methods below, or the value is
+// considered "invalid"
+type Value interface {
+	// IsMap returns true if the Value is a Map, false otherwise.
+	IsMap() bool
+	// IsList returns true if the Value is a List, false otherwise.
+	IsList() bool
+	// IsBool returns true if the Value is a bool, false otherwise.
+	IsBool() bool
+	// IsInt returns true if the Value is a int64, false otherwise.
+	IsInt() bool
+	// IsFloat returns true if the Value is a float64, false
+	// otherwise.
+	IsFloat() bool
+	// IsString returns true if the Value is a string, false
+	// otherwise.
+	IsString() bool
+	// IsMap returns true if the Value is null, false otherwise.
+	IsNull() bool
+
+	// AsMap converts the Value into a Map (or panic if the type
+	// doesn't allow it).
+	AsMap() Map
+	// AsMapUsing uses the provided allocator and converts the Value
+	// into a Map (or panic if the type doesn't allow it).
+	AsMapUsing(Allocator) Map
+	// AsList converts the Value into a List (or panic if the type
+	// doesn't allow it).
+	AsList() List
+	// AsListUsing uses the provided allocator and converts the Value
+	// into a List (or panic if the type doesn't allow it).
+	AsListUsing(Allocator) List
+	// AsBool converts the Value into a bool (or panic if the type
+	// doesn't allow it).
+	AsBool() bool
+	// AsInt converts the Value into an int64 (or panic if the type
+	// doesn't allow it).
+	AsInt() int64
+	// AsFloat converts the Value into a float64 (or panic if the type
+	// doesn't allow it).
+	AsFloat() float64
+	// AsString converts the Value into a string (or panic if the type
+	// doesn't allow it).
+	AsString() string
+
+	// Unstructured converts the Value into an Unstructured interface{}.
+	Unstructured() interface{}
+}
+
+// FromJSON is a helper function for reading a JSON document.
+func FromJSON(input []byte) (Value, error) {
+	return FromJSONFast(input)
+}
+
+// FromJSONFast is a helper function for reading a JSON document.
+func FromJSONFast(input []byte) (Value, error) {
+	iter := readPool.BorrowIterator(input)
+	defer readPool.ReturnIterator(iter)
+	return ReadJSONIter(iter)
+}
+
+// ToJSON is a helper function for producing a JSon document.
+func ToJSON(v Value) ([]byte, error) {
+	buf := bytes.Buffer{}
+	stream := writePool.BorrowStream(&buf)
+	defer writePool.ReturnStream(stream)
+	WriteJSONStream(v, stream)
+	b := stream.Buffer()
+	err := stream.Flush()
+	// Help jsoniter manage its buffers--without this, the next
+	// use of the stream is likely to require an allocation. Look
+	// at the jsoniter stream code to understand why. They were probably
+	// optimizing for folks using the buffer directly.
+	stream.SetBuffer(b[:0])
+	return buf.Bytes(), err
+}
+
+// ReadJSONIter reads a Value from a JSON iterator.
+func ReadJSONIter(iter *jsoniter.Iterator) (Value, error) {
+	v := iter.Read()
+	if iter.Error != nil && iter.Error != io.EOF {
+		return nil, iter.Error
+	}
+	return NewValueInterface(v), nil
+}
+
+// WriteJSONStream writes a value into a JSON stream.
+func WriteJSONStream(v Value, stream *jsoniter.Stream) {
+	stream.WriteVal(v.Unstructured())
+}
+
+// ToYAML marshals a value as YAML.
+func ToYAML(v Value) ([]byte, error) {
+	return yaml.Marshal(v.Unstructured())
+}
+
+// Equals returns true iff the two values are equal.
+func Equals(lhs, rhs Value) bool {
+	return EqualsUsing(HeapAllocator, lhs, rhs)
+}
+
+// EqualsUsing uses the provided allocator and returns true iff the two values are equal.
+func EqualsUsing(a Allocator, lhs, rhs Value) bool {
+	if lhs.IsFloat() || rhs.IsFloat() {
+		var lf float64
+		if lhs.IsFloat() {
+			lf = lhs.AsFloat()
+		} else if lhs.IsInt() {
+			lf = float64(lhs.AsInt())
+		} else {
+			return false
+		}
+		var rf float64
+		if rhs.IsFloat() {
+			rf = rhs.AsFloat()
+		} else if rhs.IsInt() {
+			rf = float64(rhs.AsInt())
+		} else {
+			return false
+		}
+		return lf == rf
+	}
+	if lhs.IsInt() {
+		if rhs.IsInt() {
+			return lhs.AsInt() == rhs.AsInt()
+		}
+		return false
+	} else if rhs.IsInt() {
+		return false
+	}
+	if lhs.IsString() {
+		if rhs.IsString() {
+			return lhs.AsString() == rhs.AsString()
+		}
+		return false
+	} else if rhs.IsString() {
+		return false
+	}
+	if lhs.IsBool() {
+		if rhs.IsBool() {
+			return lhs.AsBool() == rhs.AsBool()
+		}
+		return false
+	} else if rhs.IsBool() {
+		return false
+	}
+	if lhs.IsList() {
+		if rhs.IsList() {
+			lhsList := lhs.AsListUsing(a)
+			defer a.Free(lhsList)
+			rhsList := rhs.AsListUsing(a)
+			defer a.Free(rhsList)
+			return lhsList.EqualsUsing(a, rhsList)
+		}
+		return false
+	} else if rhs.IsList() {
+		return false
+	}
+	if lhs.IsMap() {
+		if rhs.IsMap() {
+			lhsList := lhs.AsMapUsing(a)
+			defer a.Free(lhsList)
+			rhsList := rhs.AsMapUsing(a)
+			defer a.Free(rhsList)
+			return lhsList.EqualsUsing(a, rhsList)
+		}
+		return false
+	} else if rhs.IsMap() {
+		return false
+	}
+	if lhs.IsNull() {
+		if rhs.IsNull() {
+			return true
+		}
+		return false
+	} else if rhs.IsNull() {
+		return false
+	}
+	// No field is set, on either objects.
+	return true
+}
+
+// ToString returns a human-readable representation of the value.
+func ToString(v Value) string {
+	if v.IsNull() {
+		return "null"
+	}
+	switch {
+	case v.IsFloat():
+		return fmt.Sprintf("%v", v.AsFloat())
+	case v.IsInt():
+		return fmt.Sprintf("%v", v.AsInt())
+	case v.IsString():
+		return fmt.Sprintf("%q", v.AsString())
+	case v.IsBool():
+		return fmt.Sprintf("%v", v.AsBool())
+	case v.IsList():
+		strs := []string{}
+		list := v.AsList()
+		for i := 0; i < list.Length(); i++ {
+			strs = append(strs, ToString(list.At(i)))
+		}
+		return "[" + strings.Join(strs, ",") + "]"
+	case v.IsMap():
+		strs := []string{}
+		v.AsMap().Iterate(func(k string, v Value) bool {
+			strs = append(strs, fmt.Sprintf("%v=%v", k, ToString(v)))
+			return true
+		})
+		return strings.Join(strs, "")
+	}
+	// No field is set, on either objects.
+	return "{{undefined}}"
+}
+
+// Less provides a total ordering for Value (so that they can be sorted, even
+// if they are of different types).
+func Less(lhs, rhs Value) bool {
+	return Compare(lhs, rhs) == -1
+}
+
+// Compare provides a total ordering for Value (so that they can be
+// sorted, even if they are of different types). The result will be 0 if
+// v==rhs, -1 if v < rhs, and +1 if v > rhs.
+func Compare(lhs, rhs Value) int {
+	return CompareUsing(HeapAllocator, lhs, rhs)
+}
+
+// CompareUsing uses the provided allocator and provides a total
+// ordering for Value (so that they can be sorted, even if they
+// are of different types). The result will be 0 if v==rhs, -1
+// if v < rhs, and +1 if v > rhs.
+func CompareUsing(a Allocator, lhs, rhs Value) int {
+	if lhs.IsFloat() {
+		if !rhs.IsFloat() {
+			// Extra: compare floats and ints numerically.
+			if rhs.IsInt() {
+				return FloatCompare(lhs.AsFloat(), float64(rhs.AsInt()))
+			}
+			return -1
+		}
+		return FloatCompare(lhs.AsFloat(), rhs.AsFloat())
+	} else if rhs.IsFloat() {
+		// Extra: compare floats and ints numerically.
+		if lhs.IsInt() {
+			return FloatCompare(float64(lhs.AsInt()), rhs.AsFloat())
+		}
+		return 1
+	}
+
+	if lhs.IsInt() {
+		if !rhs.IsInt() {
+			return -1
+		}
+		return IntCompare(lhs.AsInt(), rhs.AsInt())
+	} else if rhs.IsInt() {
+		return 1
+	}
+
+	if lhs.IsString() {
+		if !rhs.IsString() {
+			return -1
+		}
+		return strings.Compare(lhs.AsString(), rhs.AsString())
+	} else if rhs.IsString() {
+		return 1
+	}
+
+	if lhs.IsBool() {
+		if !rhs.IsBool() {
+			return -1
+		}
+		return BoolCompare(lhs.AsBool(), rhs.AsBool())
+	} else if rhs.IsBool() {
+		return 1
+	}
+
+	if lhs.IsList() {
+		if !rhs.IsList() {
+			return -1
+		}
+		lhsList := lhs.AsListUsing(a)
+		defer a.Free(lhsList)
+		rhsList := rhs.AsListUsing(a)
+		defer a.Free(rhsList)
+		return ListCompareUsing(a, lhsList, rhsList)
+	} else if rhs.IsList() {
+		return 1
+	}
+	if lhs.IsMap() {
+		if !rhs.IsMap() {
+			return -1
+		}
+		lhsMap := lhs.AsMapUsing(a)
+		defer a.Free(lhsMap)
+		rhsMap := rhs.AsMapUsing(a)
+		defer a.Free(rhsMap)
+		return MapCompareUsing(a, lhsMap, rhsMap)
+	} else if rhs.IsMap() {
+		return 1
+	}
+	if lhs.IsNull() {
+		if !rhs.IsNull() {
+			return -1
+		}
+		return 0
+	} else if rhs.IsNull() {
+		return 1
+	}
+
+	// Invalid Value-- nothing is set.
+	return 0
+}
diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v3/value/valuereflect.go b/vendor/sigs.k8s.io/structured-merge-diff/v3/value/valuereflect.go
new file mode 100644
index 00000000..05e70deb
--- /dev/null
+++ b/vendor/sigs.k8s.io/structured-merge-diff/v3/value/valuereflect.go
@@ -0,0 +1,294 @@
+/*
+Copyright 2019 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package value
+
+import (
+	"encoding/base64"
+	"fmt"
+	"reflect"
+)
+
+// NewValueReflect creates a Value backed by an "interface{}" type,
+// typically an structured object in Kubernetes world that is uses reflection to expose.
+// The provided "interface{}" value must be a pointer so that the value can be modified via reflection.
+// The provided "interface{}" may contain structs and types that are converted to Values
+// by the jsonMarshaler interface.
+func NewValueReflect(value interface{}) (Value, error) {
+	if value == nil {
+		return NewValueInterface(nil), nil
+	}
+	v := reflect.ValueOf(value)
+	if v.Kind() != reflect.Ptr {
+		// The root value to reflect on must be a pointer so that map.Set() and map.Delete() operations are possible.
+		return nil, fmt.Errorf("value provided to NewValueReflect must be a pointer")
+	}
+	return wrapValueReflect(v, nil, nil)
+}
+
+// wrapValueReflect wraps the provide reflect.Value as a value. If parent in the data tree is a map, parentMap
+// and parentMapKey must be provided so that the returned value may be set and deleted.
+func wrapValueReflect(value reflect.Value, parentMap, parentMapKey *reflect.Value) (Value, error) {
+	val := HeapAllocator.allocValueReflect()
+	return val.reuse(value, nil, parentMap, parentMapKey)
+}
+
+// wrapValueReflect wraps the provide reflect.Value as a value, and panics if there is an error. If parent in the data
+// tree is a map, parentMap and parentMapKey must be provided so that the returned value may be set and deleted.
+func mustWrapValueReflect(value reflect.Value, parentMap, parentMapKey *reflect.Value) Value {
+	v, err := wrapValueReflect(value, parentMap, parentMapKey)
+	if err != nil {
+		panic(err)
+	}
+	return v
+}
+
+// the value interface doesn't care about the type for value.IsNull, so we can use a constant
+var nilType = reflect.TypeOf(&struct{}{})
+
+// reuse replaces the value of the valueReflect. If parent in the data tree is a map, parentMap and parentMapKey
+// must be provided so that the returned value may be set and deleted.
+func (r *valueReflect) reuse(value reflect.Value, cacheEntry *TypeReflectCacheEntry, parentMap, parentMapKey *reflect.Value) (Value, error) {
+	if cacheEntry == nil {
+		cacheEntry = TypeReflectEntryOf(value.Type())
+	}
+	if cacheEntry.CanConvertToUnstructured() {
+		u, err := cacheEntry.ToUnstructured(value)
+		if err != nil {
+			return nil, err
+		}
+		if u == nil {
+			value = reflect.Zero(nilType)
+		} else {
+			value = reflect.ValueOf(u)
+		}
+	}
+	r.Value = dereference(value)
+	r.ParentMap = parentMap
+	r.ParentMapKey = parentMapKey
+	r.kind = kind(r.Value)
+	return r, nil
+}
+
+// mustReuse replaces the value of the valueReflect and panics if there is an error. If parent in the data tree is a
+// map, parentMap and parentMapKey must be provided so that the returned value may be set and deleted.
+func (r *valueReflect) mustReuse(value reflect.Value, cacheEntry *TypeReflectCacheEntry, parentMap, parentMapKey *reflect.Value) Value {
+	v, err := r.reuse(value, cacheEntry, parentMap, parentMapKey)
+	if err != nil {
+		panic(err)
+	}
+	return v
+}
+
+func dereference(val reflect.Value) reflect.Value {
+	kind := val.Kind()
+	if (kind == reflect.Interface || kind == reflect.Ptr) && !safeIsNil(val) {
+		return val.Elem()
+	}
+	return val
+}
+
+type valueReflect struct {
+	ParentMap    *reflect.Value
+	ParentMapKey *reflect.Value
+	Value        reflect.Value
+	kind         reflectType
+}
+
+func (r valueReflect) IsMap() bool {
+	return r.kind == mapType || r.kind == structMapType
+}
+
+func (r valueReflect) IsList() bool {
+	return r.kind == listType
+}
+
+func (r valueReflect) IsBool() bool {
+	return r.kind == boolType
+}
+
+func (r valueReflect) IsInt() bool {
+	return r.kind == intType || r.kind == uintType
+}
+
+func (r valueReflect) IsFloat() bool {
+	return r.kind == floatType
+}
+
+func (r valueReflect) IsString() bool {
+	return r.kind == stringType || r.kind == byteStringType
+}
+
+func (r valueReflect) IsNull() bool {
+	return r.kind == nullType
+}
+
+type reflectType = int
+
+const (
+	mapType = iota
+	structMapType
+	listType
+	intType
+	uintType
+	floatType
+	stringType
+	byteStringType
+	boolType
+	nullType
+)
+
+func kind(v reflect.Value) reflectType {
+	typ := v.Type()
+	rk := typ.Kind()
+	switch rk {
+	case reflect.Map:
+		if v.IsNil() {
+			return nullType
+		}
+		return mapType
+	case reflect.Struct:
+		return structMapType
+	case reflect.Int, reflect.Int64, reflect.Int32, reflect.Int16, reflect.Int8:
+		return intType
+	case reflect.Uint, reflect.Uint32, reflect.Uint16, reflect.Uint8:
+		// Uint64 deliberately excluded, see valueUnstructured.Int.
+		return uintType
+	case reflect.Float64, reflect.Float32:
+		return floatType
+	case reflect.String:
+		return stringType
+	case reflect.Bool:
+		return boolType
+	case reflect.Slice:
+		if v.IsNil() {
+			return nullType
+		}
+		elemKind := typ.Elem().Kind()
+		if elemKind == reflect.Uint8 {
+			return byteStringType
+		}
+		return listType
+	case reflect.Chan, reflect.Func, reflect.Ptr, reflect.UnsafePointer, reflect.Interface:
+		if v.IsNil() {
+			return nullType
+		}
+		panic(fmt.Sprintf("unsupported type: %v", v.Type()))
+	default:
+		panic(fmt.Sprintf("unsupported type: %v", v.Type()))
+	}
+}
+
+// TODO find a cleaner way to avoid panics from reflect.IsNil()
+func safeIsNil(v reflect.Value) bool {
+	k := v.Kind()
+	switch k {
+	case reflect.Chan, reflect.Func, reflect.Map, reflect.Ptr, reflect.UnsafePointer, reflect.Interface, reflect.Slice:
+		return v.IsNil()
+	}
+	return false
+}
+
+func (r valueReflect) AsMap() Map {
+	return r.AsMapUsing(HeapAllocator)
+}
+
+func (r valueReflect) AsMapUsing(a Allocator) Map {
+	switch r.kind {
+	case structMapType:
+		v := a.allocStructReflect()
+		v.valueReflect = r
+		return v
+	case mapType:
+		v := a.allocMapReflect()
+		v.valueReflect = r
+		return v
+	default:
+		panic("value is not a map or struct")
+	}
+}
+
+func (r valueReflect) AsList() List {
+	return r.AsListUsing(HeapAllocator)
+}
+
+func (r valueReflect) AsListUsing(a Allocator) List {
+	if r.IsList() {
+		v := a.allocListReflect()
+		v.Value = r.Value
+		return v
+	}
+	panic("value is not a list")
+}
+
+func (r valueReflect) AsBool() bool {
+	if r.IsBool() {
+		return r.Value.Bool()
+	}
+	panic("value is not a bool")
+}
+
+func (r valueReflect) AsInt() int64 {
+	if r.kind == intType {
+		return r.Value.Int()
+	}
+	if r.kind == uintType {
+		return int64(r.Value.Uint())
+	}
+
+	panic("value is not an int")
+}
+
+func (r valueReflect) AsFloat() float64 {
+	if r.IsFloat() {
+		return r.Value.Float()
+	}
+	panic("value is not a float")
+}
+
+func (r valueReflect) AsString() string {
+	switch r.kind {
+	case stringType:
+		return r.Value.String()
+	case byteStringType:
+		return base64.StdEncoding.EncodeToString(r.Value.Bytes())
+	}
+	panic("value is not a string")
+}
+
+func (r valueReflect) Unstructured() interface{} {
+	val := r.Value
+	switch {
+	case r.IsNull():
+		return nil
+	case val.Kind() == reflect.Struct:
+		return structReflect{r}.Unstructured()
+	case val.Kind() == reflect.Map:
+		return mapReflect{valueReflect: r}.Unstructured()
+	case r.IsList():
+		return listReflect{r.Value}.Unstructured()
+	case r.IsString():
+		return r.AsString()
+	case r.IsInt():
+		return r.AsInt()
+	case r.IsBool():
+		return r.AsBool()
+	case r.IsFloat():
+		return r.AsFloat()
+	default:
+		panic(fmt.Sprintf("value of type %s is not a supported by value reflector", val.Type()))
+	}
+}
diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v3/value/valueunstructured.go b/vendor/sigs.k8s.io/structured-merge-diff/v3/value/valueunstructured.go
new file mode 100644
index 00000000..ac5a9262
--- /dev/null
+++ b/vendor/sigs.k8s.io/structured-merge-diff/v3/value/valueunstructured.go
@@ -0,0 +1,178 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package value
+
+import (
+	"fmt"
+)
+
+// NewValueInterface creates a Value backed by an "interface{}" type,
+// typically an unstructured object in Kubernetes world.
+// interface{} must be one of: map[string]interface{}, map[interface{}]interface{}, []interface{}, int types, float types,
+// string or boolean. Nested interface{} must also be one of these types.
+func NewValueInterface(v interface{}) Value {
+	return Value(HeapAllocator.allocValueUnstructured().reuse(v))
+}
+
+type valueUnstructured struct {
+	Value interface{}
+}
+
+// reuse replaces the value of the valueUnstructured.
+func (vi *valueUnstructured) reuse(value interface{}) Value {
+	vi.Value = value
+	return vi
+}
+
+func (v valueUnstructured) IsMap() bool {
+	if _, ok := v.Value.(map[string]interface{}); ok {
+		return true
+	}
+	if _, ok := v.Value.(map[interface{}]interface{}); ok {
+		return true
+	}
+	return false
+}
+
+func (v valueUnstructured) AsMap() Map {
+	return v.AsMapUsing(HeapAllocator)
+}
+
+func (v valueUnstructured) AsMapUsing(_ Allocator) Map {
+	if v.Value == nil {
+		panic("invalid nil")
+	}
+	switch t := v.Value.(type) {
+	case map[string]interface{}:
+		return mapUnstructuredString(t)
+	case map[interface{}]interface{}:
+		return mapUnstructuredInterface(t)
+	}
+	panic(fmt.Errorf("not a map: %#v", v))
+}
+
+func (v valueUnstructured) IsList() bool {
+	if v.Value == nil {
+		return false
+	}
+	_, ok := v.Value.([]interface{})
+	return ok
+}
+
+func (v valueUnstructured) AsList() List {
+	return v.AsListUsing(HeapAllocator)
+}
+
+func (v valueUnstructured) AsListUsing(_ Allocator) List {
+	return listUnstructured(v.Value.([]interface{}))
+}
+
+func (v valueUnstructured) IsFloat() bool {
+	if v.Value == nil {
+		return false
+	} else if _, ok := v.Value.(float64); ok {
+		return true
+	} else if _, ok := v.Value.(float32); ok {
+		return true
+	}
+	return false
+}
+
+func (v valueUnstructured) AsFloat() float64 {
+	if f, ok := v.Value.(float32); ok {
+		return float64(f)
+	}
+	return v.Value.(float64)
+}
+
+func (v valueUnstructured) IsInt() bool {
+	if v.Value == nil {
+		return false
+	} else if _, ok := v.Value.(int); ok {
+		return true
+	} else if _, ok := v.Value.(int8); ok {
+		return true
+	} else if _, ok := v.Value.(int16); ok {
+		return true
+	} else if _, ok := v.Value.(int32); ok {
+		return true
+	} else if _, ok := v.Value.(int64); ok {
+		return true
+	} else if _, ok := v.Value.(uint); ok {
+		return true
+	} else if _, ok := v.Value.(uint8); ok {
+		return true
+	} else if _, ok := v.Value.(uint16); ok {
+		return true
+	} else if _, ok := v.Value.(uint32); ok {
+		return true
+	}
+	return false
+}
+
+func (v valueUnstructured) AsInt() int64 {
+	if i, ok := v.Value.(int); ok {
+		return int64(i)
+	} else if i, ok := v.Value.(int8); ok {
+		return int64(i)
+	} else if i, ok := v.Value.(int16); ok {
+		return int64(i)
+	} else if i, ok := v.Value.(int32); ok {
+		return int64(i)
+	} else if i, ok := v.Value.(uint); ok {
+		return int64(i)
+	} else if i, ok := v.Value.(uint8); ok {
+		return int64(i)
+	} else if i, ok := v.Value.(uint16); ok {
+		return int64(i)
+	} else if i, ok := v.Value.(uint32); ok {
+		return int64(i)
+	}
+	return v.Value.(int64)
+}
+
+func (v valueUnstructured) IsString() bool {
+	if v.Value == nil {
+		return false
+	}
+	_, ok := v.Value.(string)
+	return ok
+}
+
+func (v valueUnstructured) AsString() string {
+	return v.Value.(string)
+}
+
+func (v valueUnstructured) IsBool() bool {
+	if v.Value == nil {
+		return false
+	}
+	_, ok := v.Value.(bool)
+	return ok
+}
+
+func (v valueUnstructured) AsBool() bool {
+	return v.Value.(bool)
+}
+
+func (v valueUnstructured) IsNull() bool {
+	return v.Value == nil
+}
+
+func (v valueUnstructured) Unstructured() interface{} {
+	return v.Value
+}
diff --git a/vendor/sigs.k8s.io/yaml/.travis.yml b/vendor/sigs.k8s.io/yaml/.travis.yml
index 03ddc731..d20e23ef 100644
--- a/vendor/sigs.k8s.io/yaml/.travis.yml
+++ b/vendor/sigs.k8s.io/yaml/.travis.yml
@@ -1,14 +1,13 @@
 language: go
 dist: xenial
 go:
-  - 1.9.x
-  - 1.10.x
-  - 1.11.x
+  - 1.12.x
+  - 1.13.x
 script:
-  - go get -t -v ./...
-  - diff -u <(echo -n) <(gofmt -d .)
+  - diff -u <(echo -n) <(gofmt -d *.go)
   - diff -u <(echo -n) <(golint $(go list -e ./...) | grep -v YAMLToJSON)
-  - go tool vet .
-  - go test -v -race ./...
+  - GO111MODULE=on go vet .
+  - GO111MODULE=on go test -v -race ./...
+  - git diff --exit-code
 install:
-  - go get golang.org/x/lint/golint
+  - GO111MODULE=off go get golang.org/x/lint/golint
diff --git a/vendor/sigs.k8s.io/yaml/OWNERS b/vendor/sigs.k8s.io/yaml/OWNERS
index 11ad7ce1..325b40b0 100644
--- a/vendor/sigs.k8s.io/yaml/OWNERS
+++ b/vendor/sigs.k8s.io/yaml/OWNERS
@@ -1,3 +1,5 @@
+# See the OWNERS docs at https://go.k8s.io/owners
+
 approvers:
 - dims
 - lavalamp
diff --git a/vendor/sigs.k8s.io/yaml/README.md b/vendor/sigs.k8s.io/yaml/README.md
index 0200f75b..5a651d91 100644
--- a/vendor/sigs.k8s.io/yaml/README.md
+++ b/vendor/sigs.k8s.io/yaml/README.md
@@ -1,12 +1,14 @@
 # YAML marshaling and unmarshaling support for Go
 
-[![Build Status](https://travis-ci.org/ghodss/yaml.svg)](https://travis-ci.org/ghodss/yaml)
+[![Build Status](https://travis-ci.org/kubernetes-sigs/yaml.svg)](https://travis-ci.org/kubernetes-sigs/yaml)
+
+kubernetes-sigs/yaml is a permanent fork of [ghodss/yaml](https://github.com/ghodss/yaml).
 
 ## Introduction
 
 A wrapper around [go-yaml](https://github.com/go-yaml/yaml) designed to enable a better way of handling YAML when marshaling to and from structs.
 
-In short, this library first converts YAML to JSON using go-yaml and then uses `json.Marshal` and `json.Unmarshal` to convert to or from the struct. This means that it effectively reuses the JSON struct tags as well as the custom JSON methods `MarshalJSON` and `UnmarshalJSON` unlike go-yaml. For a detailed overview of the rationale behind this method, [see this blog post](http://ghodss.com/2014/the-right-way-to-handle-yaml-in-golang/).
+In short, this library first converts YAML to JSON using go-yaml and then uses `json.Marshal` and `json.Unmarshal` to convert to or from the struct. This means that it effectively reuses the JSON struct tags as well as the custom JSON methods `MarshalJSON` and `UnmarshalJSON` unlike go-yaml. For a detailed overview of the rationale behind this method, [see this blog post](http://web.archive.org/web/20190603050330/http://ghodss.com/2014/the-right-way-to-handle-yaml-in-golang/).
 
 ## Compatibility
 
@@ -32,13 +34,13 @@ GOOD:
 To install, run:
 
 ```
-$ go get github.com/ghodss/yaml
+$ go get sigs.k8s.io/yaml
 ```
 
 And import using:
 
 ```
-import "github.com/ghodss/yaml"
+import "sigs.k8s.io/yaml"
 ```
 
 Usage is very similar to the JSON library:
@@ -49,7 +51,7 @@ package main
 import (
 	"fmt"
 
-	"github.com/ghodss/yaml"
+	"sigs.k8s.io/yaml"
 )
 
 type Person struct {
@@ -93,7 +95,7 @@ package main
 import (
 	"fmt"
 
-	"github.com/ghodss/yaml"
+	"sigs.k8s.io/yaml"
 )
 
 func main() {
diff --git a/vendor/sigs.k8s.io/yaml/go.mod b/vendor/sigs.k8s.io/yaml/go.mod
new file mode 100644
index 00000000..7224f349
--- /dev/null
+++ b/vendor/sigs.k8s.io/yaml/go.mod
@@ -0,0 +1,8 @@
+module sigs.k8s.io/yaml
+
+go 1.12
+
+require (
+	github.com/davecgh/go-spew v1.1.1
+	gopkg.in/yaml.v2 v2.2.8
+)
diff --git a/vendor/sigs.k8s.io/yaml/go.sum b/vendor/sigs.k8s.io/yaml/go.sum
new file mode 100644
index 00000000..76e49483
--- /dev/null
+++ b/vendor/sigs.k8s.io/yaml/go.sum
@@ -0,0 +1,9 @@
+github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
+github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw=
+gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.7 h1:VUgggvou5XRW9mHwD/yXxIYSMtY0zoKQf/v226p2nyo=
+gopkg.in/yaml.v2 v2.2.7/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10=
+gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
diff --git a/vendor/sigs.k8s.io/yaml/yaml.go b/vendor/sigs.k8s.io/yaml/yaml.go
index 02459611..efbc535d 100644
--- a/vendor/sigs.k8s.io/yaml/yaml.go
+++ b/vendor/sigs.k8s.io/yaml/yaml.go
@@ -317,3 +317,64 @@ func convertToJSONableObject(yamlObj interface{}, jsonTarget *reflect.Value) (in
 		return yamlObj, nil
 	}
 }
+
+// JSONObjectToYAMLObject converts an in-memory JSON object into a YAML in-memory MapSlice,
+// without going through a byte representation. A nil or empty map[string]interface{} input is
+// converted to an empty map, i.e. yaml.MapSlice(nil).
+//
+// interface{} slices stay interface{} slices. map[string]interface{} becomes yaml.MapSlice.
+//
+// int64 and float64 are down casted following the logic of github.com/go-yaml/yaml:
+// - float64s are down-casted as far as possible without data-loss to int, int64, uint64.
+// - int64s are down-casted to int if possible without data-loss.
+//
+// Big int/int64/uint64 do not lose precision as in the json-yaml roundtripping case.
+//
+// string, bool and any other types are unchanged.
+func JSONObjectToYAMLObject(j map[string]interface{}) yaml.MapSlice {
+	if len(j) == 0 {
+		return nil
+	}
+	ret := make(yaml.MapSlice, 0, len(j))
+	for k, v := range j {
+		ret = append(ret, yaml.MapItem{Key: k, Value: jsonToYAMLValue(v)})
+	}
+	return ret
+}
+
+func jsonToYAMLValue(j interface{}) interface{} {
+	switch j := j.(type) {
+	case map[string]interface{}:
+		if j == nil {
+			return interface{}(nil)
+		}
+		return JSONObjectToYAMLObject(j)
+	case []interface{}:
+		if j == nil {
+			return interface{}(nil)
+		}
+		ret := make([]interface{}, len(j))
+		for i := range j {
+			ret[i] = jsonToYAMLValue(j[i])
+		}
+		return ret
+	case float64:
+		// replicate the logic in https://github.com/go-yaml/yaml/blob/51d6538a90f86fe93ac480b35f37b2be17fef232/resolve.go#L151
+		if i64 := int64(j); j == float64(i64) {
+			if i := int(i64); i64 == int64(i) {
+				return i
+			}
+			return i64
+		}
+		if ui64 := uint64(j); j == float64(ui64) {
+			return ui64
+		}
+		return j
+	case int64:
+		if i := int(j); j == int64(i) {
+			return i
+		}
+		return j
+	}
+	return j
+}